Compare commits

...

70 Commits

Author SHA1 Message Date
Simon Larsen
2f517d8dcc feat: Update SCIM documentation URLs to use environment configuration 2025-08-05 11:00:18 +01:00
Simon Larsen
cb5c4dce45 feat: Add SCIM API documentation for user provisioning and deprovisioning 2025-08-05 10:52:57 +01:00
Simon Larsen
d9abeda60d feat: Refactor SCIM utility functions for improved modularity and logging 2025-08-05 10:33:38 +01:00
Simon Larsen
15c4c89310 feat: Add StatusPageSCIM model and related database migration
- Implemented StatusPageSCIM model with necessary fields and access controls.
- Created migration script to set up StatusPageSCIM table in the database.
- Developed StatusPageSCIMService for handling SCIM configurations, including bearer token generation.
- Added SCIM management page in the dashboard with functionalities for creating, editing, and resetting bearer tokens.
2025-08-05 10:07:24 +01:00
Simon Larsen
8c1d5652f4 feat: Change button style type for resetting bearer token to outline 2025-08-04 22:14:31 +01:00
Nawaz Dhandala
fbf87cf8d4 refactor: Add type annotations to formatUserForSCIM and resetBearerToken functions for improved type safety 2025-08-04 22:10:06 +01:00
Nawaz Dhandala
1c12ad94dd fix: Add type annotations for improved type safety in SCIM and Metrics modules 2025-08-04 22:07:10 +01:00
Nawaz Dhandala
aa09bab7c9 Refactor SCIM migrations and models; update formatting and improve readability
- Added missing comma in AllModelTypes array in Index.ts.
- Refactored MigrationName1754304193228 to improve query formatting and readability.
- Refactored MigrationName1754315774827 for consistency in formatting.
- Updated migration index file to include new migration.
- Standardized string quotes in Queue.ts for consistency.
- Cleaned up SCIMAuthorization.ts by removing unnecessary whitespace and improving log formatting.
- Refactored StartServer.ts to standardize content-type header handling.
- Improved formatting in SCIM.tsx for better readability and consistency.
- Refactored Metrics.ts to standardize queueSize extraction and type checking.
- Enhanced Probe.ts logging for clarity and consistency.
2025-08-04 21:36:11 +01:00
Simon Larsen
f7d1975ab0 feat: Add debug logging for parsing names from SCIM users 2025-08-04 21:35:30 +01:00
Simon Larsen
99c9a591cb feat: Refactor SCIM user handling to improve name parsing and team operations 2025-08-04 21:29:16 +01:00
Simon Larsen
c956d01789 feat: Enhance user name handling in SCIM responses by parsing full names into given and family names 2025-08-04 21:22:01 +01:00
Simon Larsen
17c829869b feat: Implement user activation handling by adding users to configured teams 2025-08-04 21:18:03 +01:00
Simon Larsen
d65e91a912 feat: Enhance SCIM user update logging and handle user deactivation by removing from teams 2025-08-04 21:17:28 +01:00
Simon Larsen
39710ba9b0 feat: Enhance SCIM user update and delete logging, and improve team removal logic 2025-08-04 21:12:49 +01:00
Simon Larsen
8c70a4dfae feat: Update SCIM user handling to improve pagination and remove duplicates 2025-08-04 18:00:20 +01:00
Simon Larsen
ff99055594 feat: Refactor SCIM endpoints to enhance logging and improve user query handling 2025-08-04 17:44:23 +01:00
Simon Larsen
f01cc2fd71 feat: Enhance logging for SCIM requests and responses across various endpoints 2025-08-04 17:34:28 +01:00
Simon Larsen
49b43593b1 feat: Add middleware to handle SCIM content type before JSON parsing 2025-08-04 17:25:01 +01:00
Simon Larsen
e293ffd0eb feat: Remove isEnabled column from ProjectSCIM and update related services and migrations 2025-08-04 14:58:25 +01:00
Simon Larsen
b62a5e7722 feat: Add functionality to reset Bearer Token with confirmation modals 2025-08-04 14:47:45 +01:00
Simon Larsen
8f8ba0abb8 feat: Enhance SCIM middleware logging and update SCIM page state management 2025-08-04 13:01:09 +01:00
Simon Larsen
5525556b54 feat: Rename ProjectScima to ProjectSCIM and update imports 2025-08-04 12:28:09 +01:00
Simon Larsen
669066b70a feat: Implement ProjectSCIM model and SCIM page functionality 2025-08-04 12:27:48 +01:00
Simon Larsen
76d2abed08 fix: Update SCIM endpoint URLs to include versioning 2025-08-04 12:01:50 +01:00
Simon Larsen
a6c18b3f21 fix: Remove HTTP_PROTOCOL from SCIM endpoint URLs in SCIMPage component 2025-08-04 12:01:24 +01:00
Simon Larsen
955ea7bc31 feat: Restore ProjectSCIM service with bearer token generation logic 2025-08-04 11:58:18 +01:00
Simon Larsen
45719d4656 feat: Reintroduce ProjectSCIM service with bearer token generation logic 2025-08-04 11:58:07 +01:00
Simon Larsen
796c94a261 fix: Correct import casing for ProjectSCIM across multiple files 2025-08-04 11:46:56 +01:00
Simon Larsen
d2fe822cb7 feat: Integrate ProjectSCIM model and service into the Base API feature set 2025-08-04 11:44:02 +01:00
Simon Larsen
289a369eab feat: Add migration for ProjectSCIM and ProjectScimTeam tables with foreign key constraints 2025-08-04 11:43:44 +01:00
Simon Larsen
6f07e3e119 feat: Update SCIM API endpoints to include versioning in the URL 2025-08-04 11:19:17 +01:00
Simon Larsen
8cdc1e9faf feat: Add SCIM API endpoints and middleware for user management and configuration 2025-08-04 11:09:52 +01:00
Simon Larsen
d4609a84ef feat: Implement Project SCIM service with bearer token generation 2025-08-04 10:15:34 +01:00
Simon Larsen
eb4a91a598 feat: Add SCIM settings page and routing to the dashboard 2025-08-04 10:14:47 +01:00
Simon Larsen
5bea404d6c feat: Add SCIM API integration to Identity feature set 2025-08-04 10:13:44 +01:00
Simon Larsen
df3f8b6a74 feat: Add optional stackTrace field to job data structures for enhanced error tracking 2025-08-03 12:59:51 +01:00
Simon Larsen
0c9d2c821a feat: Add advanced horizontal pod autoscaler configuration for improved scaling behavior 2025-08-02 13:05:05 +01:00
Simon Larsen
ba49aaf0c3 fix: Skip probe offline email notifications when billing is enabled 2025-08-02 12:36:50 +01:00
Simon Larsen
6ea5ad7fe8 fix: Update nextPingAt calculation to use a 2-minute offset for improved timing accuracy 2025-08-02 11:42:01 +01:00
Simon Larsen
962866d109 fix: Improve queue size extraction and handling in metrics endpoint 2025-08-01 20:58:58 +01:00
Simon Larsen
115216561c feat: Add ports configuration for OneUptime probe service 2025-08-01 20:36:30 +01:00
Simon Larsen
f709c90cc4 fix: Update probe port handling in KEDA ScaledObjects for improved configuration 2025-08-01 20:21:41 +01:00
Simon Larsen
d7f01b0189 fix: Update default port value in probe template for better configuration handling 2025-08-01 20:19:31 +01:00
Simon Larsen
c3eaa8995c fix ports 2025-08-01 20:19:10 +01:00
Simon Larsen
53b482b9f3 refactor: Update Helm templates to use new port structure in values.yaml 2025-08-01 20:13:30 +01:00
Simon Larsen
d52670f39c refactor: Update Helm templates to use new port structure in values.yaml 2025-08-01 18:22:05 +01:00
Simon Larsen
fdc1332b9e Merge branch 'master' of github.com:OneUptime/oneuptime 2025-08-01 16:17:09 +01:00
Simon Larsen
a937416663 fix: Update autoscaler condition to prevent conflicts with KEDA configuration 2025-08-01 16:17:05 +01:00
Nawaz Dhandala
546d41da81 fix: Clean up formatting and ensure consistent return structure in metrics endpoints 2025-08-01 16:13:05 +01:00
Simon Larsen
c4c6793b29 feat: Implement KEDA autoscaling configuration for probes and add metrics endpoints 2025-08-01 15:38:04 +01:00
Simon Larsen
c894b112e6 fix: Await monitorResource call to ensure proper error handling in incoming request processing 2025-08-01 14:34:17 +01:00
Simon Larsen
304baf1bb4 fix: Await monitorResource call to ensure proper error handling in probe response processing 2025-08-01 14:33:17 +01:00
Simon Larsen
9adea6b1ba feat: Remove Helm annotations for post-install and post-upgrade hooks from templates 2025-08-01 14:01:04 +01:00
Simon Larsen
5498521e02 feat: Add Helm annotations for post-install and post-upgrade hooks 2025-08-01 13:47:52 +01:00
Simon Larsen
9e97c6ddbc feat: Update autoscaler conditions for fluent-ingest, incoming-request-ingest, probe-ingest, and server-monitor-ingest templates 2025-08-01 13:23:39 +01:00
Nawaz Dhandala
63272e09f8 refactor: Simplify function parameter formatting and improve readability in various files 2025-08-01 10:45:55 +01:00
Simon Larsen
327c28afdc feat: Implement fluent ingest worker for processing queue jobs 2025-08-01 10:34:17 +01:00
Simon Larsen
896020b93b feat: Add KEDA autoscaling configuration for various ingests
- Introduced KEDA autoscaling configuration in values.yaml for probeIngest, fluentIngest, incomingRequestIngest, and serverMonitorIngest.
- Added endpoints for queue statistics, size, and failed jobs in IncomingRequestIngest and ProbeIngest APIs.
- Implemented asynchronous processing of incoming requests and probes using job queues.
- Created Metrics API for KEDA metrics integration in IncomingRequestIngest, ProbeIngest, and ServerMonitorIngest.
- Refactored IncomingRequest and Probe APIs to utilize queue services for processing.
- Added job processing logic for incoming requests and probes in respective job files.
- Implemented queue service classes for managing job addition and retrieval of queue statistics.
2025-08-01 10:29:02 +01:00
Simon Larsen
15a68472b0 feat: comment out ClusterKeyAuthorization import for KEDA debugging 2025-07-31 21:23:41 +01:00
Simon Larsen
0210480d97 feat: remove Prometheus metrics endpoint for KEDA debugging 2025-07-31 21:22:04 +01:00
Simon Larsen
72fdc06687 feat: temporarily disable authentication middleware for KEDA debugging in metrics endpoint 2025-07-31 20:52:10 +01:00
Simon Larsen
3710b81b9a feat: add replica count support for deployments in Helm templates 2025-07-31 20:03:25 +01:00
Simon Larsen
9fcb3dc2e0 feat: update cluster key handling for KEDA compatibility in authorization middleware and Helm chart 2025-07-31 19:50:25 +01:00
Simon Larsen
43e2ccf51a feat: improve secret handling in Helm chart for upgrade scenarios 2025-07-31 19:29:07 +01:00
Nawaz Dhandala
48c3d8603a fix: format code for better readability and consistency in MonitorResource and Metrics 2025-07-31 12:57:39 +01:00
Simon Larsen
9cfc912161 feat: enhance response messages for incoming request checks with time difference 2025-07-31 12:56:06 +01:00
Simon Larsen
29e3ee57ab feat: add metrics-api endpoint for queue size retrieval in KEDA autoscaling 2025-07-31 12:44:48 +01:00
Simon Larsen
be7e849822 feat: add KEDA ScaledObjects for OpenTelemetry Ingest with configurable metrics 2025-07-31 12:34:43 +01:00
Simon Larsen
59d76b601a feat: add KEDA autoscaling support for OpenTelemetry Ingest with configurable metrics 2025-07-31 12:18:57 +01:00
Simon Larsen
b77ef336b8 feat: add replica count configuration for multiple deployments in Helm templates 2025-07-31 11:47:03 +01:00
80 changed files with 5664 additions and 465 deletions

View File

@@ -583,6 +583,12 @@ import StatusPageAnnouncementTemplateService, {
Service as StatusPageAnnouncementTemplateServiceType,
} from "Common/Server/Services/StatusPageAnnouncementTemplateService";
// ProjectSCIM
import ProjectSCIM from "Common/Models/DatabaseModels/ProjectSCIM";
import ProjectSCIMService, {
Service as ProjectSCIMServiceType,
} from "Common/Server/Services/ProjectSCIMService";
// Open API Spec
import OpenAPI from "Common/Server/API/OpenAPI";
@@ -618,6 +624,15 @@ const BaseAPIFeatureSet: FeatureSet = {
).getRouter(),
);
// Project SCIM
app.use(
`/${APP_NAME.toLocaleLowerCase()}`,
new BaseAPI<ProjectSCIM, ProjectSCIMServiceType>(
ProjectSCIM,
ProjectSCIMService,
).getRouter(),
);
// status page announcement templates
app.use(
`/${APP_NAME.toLocaleLowerCase()}`,

View File

@@ -0,0 +1,663 @@
import SCIMMiddleware from "Common/Server/Middleware/SCIMAuthorization";
import UserService from "Common/Server/Services/UserService";
import TeamMemberService from "Common/Server/Services/TeamMemberService";
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
OneUptimeRequest,
} from "Common/Server/Utils/Express";
import Response from "Common/Server/Utils/Response";
import logger from "Common/Server/Utils/Logger";
import ObjectID from "Common/Types/ObjectID";
import Email from "Common/Types/Email";
import Name from "Common/Types/Name";
import { JSONObject } from "Common/Types/JSON";
import TeamMember from "Common/Models/DatabaseModels/TeamMember";
import ProjectSCIM from "Common/Models/DatabaseModels/ProjectSCIM";
import BadRequestException from "Common/Types/Exception/BadRequestException";
import NotFoundException from "Common/Types/Exception/NotFoundException";
import OneUptimeDate from "Common/Types/Date";
import LIMIT_MAX, { LIMIT_PER_PROJECT } from "Common/Types/Database/LimitMax";
import Query from "Common/Types/BaseDatabase/Query";
import ProjectUser from "Common/Models/DatabaseModels/ProjectUser";
import QueryHelper from "Common/Server/Types/Database/QueryHelper";
import User from "Common/Models/DatabaseModels/User";
import {
parseNameFromSCIM,
formatUserForSCIM,
generateServiceProviderConfig,
generateUsersListResponse,
parseSCIMQueryParams,
logSCIMOperation,
} from "../Utils/SCIMUtils";
import { DocsClientUrl } from "Common/Server/EnvironmentConfig";
const router: ExpressRouter = Express.getRouter();
const handleUserTeamOperations: (
operation: "add" | "remove",
projectId: ObjectID,
userId: ObjectID,
scimConfig: ProjectSCIM,
) => Promise<void> = async (
operation: "add" | "remove",
projectId: ObjectID,
userId: ObjectID,
scimConfig: ProjectSCIM,
): Promise<void> => {
const teamsIds: Array<ObjectID> =
scimConfig.teams?.map((team: any) => {
return team.id;
}) || [];
if (teamsIds.length === 0) {
logger.debug(`SCIM Team operations - no teams configured for SCIM`);
return;
}
if (operation === "add") {
logger.debug(
`SCIM Team operations - adding user to ${teamsIds.length} configured teams`,
);
for (const team of scimConfig.teams || []) {
const existingMember: TeamMember | null =
await TeamMemberService.findOneBy({
query: {
projectId: projectId,
userId: userId,
teamId: team.id!,
},
select: { _id: true },
props: { isRoot: true },
});
if (!existingMember) {
logger.debug(`SCIM Team operations - adding user to team: ${team.id}`);
const teamMember: TeamMember = new TeamMember();
teamMember.projectId = projectId;
teamMember.userId = userId;
teamMember.teamId = team.id!;
teamMember.hasAcceptedInvitation = true;
teamMember.invitationAcceptedAt = OneUptimeDate.getCurrentDate();
await TeamMemberService.create({
data: teamMember,
props: {
isRoot: true,
ignoreHooks: true,
},
});
} else {
logger.debug(
`SCIM Team operations - user already member of team: ${team.id}`,
);
}
}
} else if (operation === "remove") {
logger.debug(
`SCIM Team operations - removing user from ${teamsIds.length} configured teams`,
);
await TeamMemberService.deleteBy({
query: {
projectId: projectId,
userId: userId,
teamId: QueryHelper.any(teamsIds),
},
skip: 0,
limit: LIMIT_PER_PROJECT,
props: { isRoot: true },
});
}
};
// SCIM Service Provider Configuration - GET /scim/v2/ServiceProviderConfig
router.get(
"/scim/v2/:projectScimId/ServiceProviderConfig",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logSCIMOperation("ServiceProviderConfig", "project", req.params["projectScimId"]!);
const serviceProviderConfig: JSONObject = generateServiceProviderConfig(
req,
req.params["projectScimId"]!,
"project",
DocsClientUrl.toString()+"/identity/scim"
);
logger.debug("Project SCIM ServiceProviderConfig response prepared successfully");
return Response.sendJsonObjectResponse(req, res, serviceProviderConfig);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Basic Users endpoint - GET /scim/v2/Users
router.get(
"/scim/v2/:projectScimId/Users",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logSCIMOperation("Users list", "project", req.params["projectScimId"]!);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const projectId: ObjectID = bearerData["projectId"] as ObjectID;
// Parse query parameters
const { startIndex, count } = parseSCIMQueryParams(req);
const filter: string = req.query["filter"] as string;
logSCIMOperation(
"Users list",
"project",
req.params["projectScimId"]!,
`startIndex: ${startIndex}, count: ${count}, filter: ${filter || "none"}`
);
// Build query for team members in this project
const query: Query<ProjectUser> = {
projectId: projectId,
};
// Handle SCIM filter for userName
if (filter) {
const emailMatch: RegExpMatchArray | null = filter.match(
/userName eq "([^"]+)"/i,
);
if (emailMatch) {
const email: string = emailMatch[1]!;
logSCIMOperation("Users list", "project", req.params["projectScimId"]!, `filter by email: ${email}`);
if (email) {
const user: User | null = await UserService.findOneBy({
query: { email: new Email(email) },
select: { _id: true },
props: { isRoot: true },
});
if (user && user.id) {
query.userId = user.id;
logSCIMOperation("Users list", "project", req.params["projectScimId"]!, `found user with id: ${user.id}`);
} else {
logSCIMOperation("Users list", "project", req.params["projectScimId"]!, `user not found for email: ${email}`);
return Response.sendJsonObjectResponse(req, res, generateUsersListResponse([], startIndex, 0));
}
}
}
}
logSCIMOperation("Users list", "project", req.params["projectScimId"]!, `query built for projectId: ${projectId}`);
// Get team members
const teamMembers: Array<TeamMember> = await TeamMemberService.findBy({
query: query,
limit: LIMIT_MAX,
skip: 0,
props: { isRoot: true },
select: {
userId: true,
user: {
_id: true,
email: true,
name: true,
createdAt: true,
updatedAt: true,
},
},
});
// now get unique users.
const usersInProjects: Array<JSONObject> = teamMembers
.filter((tm: TeamMember) => {
return tm.user && tm.user.id;
})
.map((tm: TeamMember) => {
return formatUserForSCIM(tm.user!, req, req.params["projectScimId"]!, "project");
});
// remove duplicates
const uniqueUserIds: Set<string> = new Set<string>();
const users: Array<JSONObject> = usersInProjects.filter(
(user: JSONObject) => {
if (uniqueUserIds.has(user["id"]?.toString() || "")) {
return false;
}
uniqueUserIds.add(user["id"]?.toString() || "");
return true;
},
);
// now paginate the results
const paginatedUsers: Array<JSONObject> = users.slice(
(startIndex - 1) * count,
startIndex * count,
);
logger.debug(`SCIM Users response prepared with ${users.length} users`);
return Response.sendJsonObjectResponse(req, res, generateUsersListResponse(paginatedUsers, startIndex, users.length));
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Get Individual User - GET /scim/v2/Users/{id}
router.get(
"/scim/v2/:projectScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`SCIM Get individual user request for userId: ${req.params["userId"]}, projectScimId: ${req.params["projectScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const projectId: ObjectID = bearerData["projectId"] as ObjectID;
const userId: string = req.params["userId"]!;
logger.debug(
`SCIM Get user - projectId: ${projectId}, userId: ${userId}`,
);
if (!userId) {
throw new BadRequestException("User ID is required");
}
// Check if user exists and is part of the project
const projectUser: TeamMember | null = await TeamMemberService.findOneBy({
query: {
projectId: projectId,
userId: new ObjectID(userId),
},
select: {
userId: true,
user: {
_id: true,
email: true,
name: true,
createdAt: true,
updatedAt: true,
},
},
props: { isRoot: true },
});
if (!projectUser || !projectUser.user) {
logger.debug(
`SCIM Get user - user not found or not part of project for userId: ${userId}`,
);
throw new NotFoundException(
"User not found or not part of this project",
);
}
logger.debug(`SCIM Get user - found user: ${projectUser.user.id}`);
const user: JSONObject = formatUserForSCIM(projectUser.user, req, req.params["projectScimId"]!, "project");
return Response.sendJsonObjectResponse(req, res, user);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Update User - PUT /scim/v2/Users/{id}
router.put(
"/scim/v2/:projectScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`SCIM Update user request for userId: ${req.params["userId"]}, projectScimId: ${req.params["projectScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const projectId: ObjectID = bearerData["projectId"] as ObjectID;
const userId: string = req.params["userId"]!;
const scimUser: JSONObject = req.body;
logger.debug(
`SCIM Update user - projectId: ${projectId}, userId: ${userId}`,
);
logger.debug(
`Request body for SCIM Update user: ${JSON.stringify(scimUser, null, 2)}`,
);
if (!userId) {
throw new BadRequestException("User ID is required");
}
// Check if user exists and is part of the project
const projectUser: TeamMember | null = await TeamMemberService.findOneBy({
query: {
projectId: projectId,
userId: new ObjectID(userId),
},
select: {
userId: true,
user: {
_id: true,
email: true,
name: true,
createdAt: true,
updatedAt: true,
},
},
props: { isRoot: true },
});
if (!projectUser || !projectUser.user) {
logger.debug(
`SCIM Update user - user not found or not part of project for userId: ${userId}`,
);
throw new NotFoundException(
"User not found or not part of this project",
);
}
// Update user information
const email: string =
(scimUser["userName"] as string) ||
((scimUser["emails"] as JSONObject[])?.[0]?.["value"] as string);
const name: string = parseNameFromSCIM(scimUser);
const active: boolean = scimUser["active"] as boolean;
logger.debug(
`SCIM Update user - email: ${email}, name: ${name}, active: ${active}`,
);
// Handle user deactivation by removing from teams
if (active === false) {
logger.debug(
`SCIM Update user - user marked as inactive, removing from teams`,
);
const scimConfig: ProjectSCIM = bearerData["scimConfig"] as ProjectSCIM;
await handleUserTeamOperations(
"remove",
projectId,
new ObjectID(userId),
scimConfig,
);
logger.debug(
`SCIM Update user - user successfully removed from teams due to deactivation`,
);
}
// Handle user activation by adding to teams
if (active === true) {
logger.debug(
`SCIM Update user - user marked as active, adding to teams`,
);
const scimConfig: ProjectSCIM = bearerData["scimConfig"] as ProjectSCIM;
await handleUserTeamOperations(
"add",
projectId,
new ObjectID(userId),
scimConfig,
);
logger.debug(
`SCIM Update user - user successfully added to teams due to activation`,
);
}
if (email || name) {
const updateData: any = {};
if (email) {
updateData.email = new Email(email);
}
if (name) {
updateData.name = new Name(name);
}
logger.debug(
`SCIM Update user - updating user with data: ${JSON.stringify(updateData)}`,
);
await UserService.updateOneById({
id: new ObjectID(userId),
data: updateData,
props: { isRoot: true },
});
logger.debug(`SCIM Update user - user updated successfully`);
// Fetch updated user
const updatedUser: User | null = await UserService.findOneById({
id: new ObjectID(userId),
select: {
_id: true,
email: true,
name: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
if (updatedUser) {
const user: JSONObject = formatUserForSCIM(updatedUser, req, req.params["projectScimId"]!, "project");
return Response.sendJsonObjectResponse(req, res, user);
}
}
logger.debug(
`SCIM Update user - no updates made, returning existing user`,
);
// If no updates were made, return the existing user
const user: JSONObject = formatUserForSCIM(projectUser.user, req, req.params["projectScimId"]!, "project");
return Response.sendJsonObjectResponse(req, res, user);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Groups endpoint - GET /scim/v2/Groups
router.get(
"/scim/v2/:projectScimId/Groups",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`SCIM Groups list request for projectScimId: ${req.params["projectScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const scimConfig: ProjectSCIM = bearerData["scimConfig"] as ProjectSCIM;
logger.debug(
`SCIM Groups - found ${scimConfig.teams?.length || 0} configured teams`,
);
// Return configured teams as groups
const groups: JSONObject[] = (scimConfig.teams || []).map((team: any) => {
return {
schemas: ["urn:ietf:params:scim:schemas:core:2.0:Group"],
id: team.id?.toString(),
displayName: team.name?.toString(),
members: [],
meta: {
resourceType: "Group",
location: `${req.protocol}://${req.get("host")}/scim/v2/${req.params["projectScimId"]}/Groups/${team.id?.toString()}`,
},
};
});
return Response.sendJsonObjectResponse(req, res, {
schemas: ["urn:ietf:params:scim:api:messages:2.0:ListResponse"],
totalResults: groups.length,
startIndex: 1,
itemsPerPage: groups.length,
Resources: groups,
});
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Create User - POST /scim/v2/Users
router.post(
"/scim/v2/:projectScimId/Users",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`SCIM Create user request for projectScimId: ${req.params["projectScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const projectId: ObjectID = bearerData["projectId"] as ObjectID;
const scimConfig: ProjectSCIM = bearerData["scimConfig"] as ProjectSCIM;
if (!scimConfig.autoProvisionUsers) {
throw new BadRequestException(
"Auto-provisioning is disabled for this project",
);
}
const scimUser: JSONObject = req.body;
const email: string =
(scimUser["userName"] as string) ||
((scimUser["emails"] as JSONObject[])?.[0]?.["value"] as string);
const name: string = parseNameFromSCIM(scimUser);
logger.debug(`SCIM Create user - email: ${email}, name: ${name}`);
if (!email) {
throw new BadRequestException("userName or email is required");
}
// Check if user already exists
let user: User | null = await UserService.findOneBy({
query: { email: new Email(email) },
select: {
_id: true,
email: true,
name: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
// Create user if doesn't exist
if (!user) {
logger.debug(
`SCIM Create user - creating new user for email: ${email}`,
);
user = await UserService.createByEmail({
email: new Email(email),
name: name ? new Name(name) : new Name("Unknown"),
isEmailVerified: true,
generateRandomPassword: true,
props: { isRoot: true },
});
} else {
logger.debug(
`SCIM Create user - user already exists with id: ${user.id}`,
);
}
// Add user to default teams if configured
if (scimConfig.teams && scimConfig.teams.length > 0) {
logger.debug(
`SCIM Create user - adding user to ${scimConfig.teams.length} configured teams`,
);
await handleUserTeamOperations("add", projectId, user.id!, scimConfig);
}
const createdUser: JSONObject = formatUserForSCIM(user, req, req.params["projectScimId"]!, "project");
logger.debug(
`SCIM Create user - returning created user with id: ${user.id}`,
);
res.status(201);
return Response.sendJsonObjectResponse(req, res, createdUser);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Delete User - DELETE /scim/v2/Users/{id}
router.delete(
"/scim/v2/:projectScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`SCIM Delete user request for userId: ${req.params["userId"]}, projectScimId: ${req.params["projectScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const projectId: ObjectID = bearerData["projectId"] as ObjectID;
const scimConfig: ProjectSCIM = bearerData["scimConfig"] as ProjectSCIM;
const userId: string = req.params["userId"]!;
if (!scimConfig.autoDeprovisionUsers) {
logger.debug("SCIM Delete user - auto-deprovisioning is disabled");
throw new BadRequestException(
"Auto-deprovisioning is disabled for this project",
);
}
if (!userId) {
throw new BadRequestException("User ID is required");
}
logger.debug(
`SCIM Delete user - removing user from all teams in project: ${projectId}`,
);
// Remove user from teams the SCIM configured
if (!scimConfig.teams || scimConfig.teams.length === 0) {
logger.debug("SCIM Delete user - no teams configured for SCIM");
throw new BadRequestException("No teams configured for SCIM");
}
await handleUserTeamOperations(
"remove",
projectId,
new ObjectID(userId),
scimConfig,
);
logger.debug(
`SCIM Delete user - user successfully deprovisioned from project`,
);
res.status(204);
return Response.sendJsonObjectResponse(req, res, {
message: "User deprovisioned",
});
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
export default router;

View File

@@ -0,0 +1,488 @@
import SCIMMiddleware from "Common/Server/Middleware/SCIMAuthorization";
import StatusPagePrivateUserService from "Common/Server/Services/StatusPagePrivateUserService";
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
OneUptimeRequest,
} from "Common/Server/Utils/Express";
import Response from "Common/Server/Utils/Response";
import logger from "Common/Server/Utils/Logger";
import ObjectID from "Common/Types/ObjectID";
import Email from "Common/Types/Email";
import { JSONObject } from "Common/Types/JSON";
import StatusPagePrivateUser from "Common/Models/DatabaseModels/StatusPagePrivateUser";
import StatusPageSCIM from "Common/Models/DatabaseModels/StatusPageSCIM";
import BadRequestException from "Common/Types/Exception/BadRequestException";
import NotFoundException from "Common/Types/Exception/NotFoundException";
import LIMIT_MAX, { LIMIT_PER_PROJECT } from "Common/Types/Database/LimitMax";
import {
formatUserForSCIM,
generateServiceProviderConfig,
logSCIMOperation,
} from "../Utils/SCIMUtils";
const router: ExpressRouter = Express.getRouter();
// SCIM Service Provider Configuration - GET /status-page-scim/v2/ServiceProviderConfig
router.get(
"/status-page-scim/v2/:statusPageScimId/ServiceProviderConfig",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logSCIMOperation("ServiceProviderConfig", "status-page", req.params["statusPageScimId"]!);
const serviceProviderConfig: JSONObject = generateServiceProviderConfig(
req,
req.params["statusPageScimId"]!,
"status-page"
);
return Response.sendJsonObjectResponse(req, res, serviceProviderConfig);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Status Page Users endpoint - GET /status-page-scim/v2/Users
router.get(
"/status-page-scim/v2/:statusPageScimId/Users",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`Status Page SCIM Users list request for statusPageScimId: ${req.params["statusPageScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const statusPageId: ObjectID = bearerData["statusPageId"] as ObjectID;
// Parse query parameters
const startIndex: number = parseInt(req.query["startIndex"] as string) || 1;
const count: number = Math.min(
parseInt(req.query["count"] as string) || 100,
LIMIT_PER_PROJECT,
);
logger.debug(
`Status Page SCIM Users - statusPageId: ${statusPageId}, startIndex: ${startIndex}, count: ${count}`,
);
// Get all private users for this status page
const statusPageUsers: Array<StatusPagePrivateUser> =
await StatusPagePrivateUserService.findBy({
query: {
statusPageId: statusPageId,
},
select: {
_id: true,
email: true,
createdAt: true,
updatedAt: true,
},
skip: 0,
limit: LIMIT_MAX,
props: { isRoot: true },
});
logger.debug(
`Status Page SCIM Users - found ${statusPageUsers.length} users`,
);
// Format users for SCIM
const users: Array<JSONObject> = statusPageUsers.map(
(user: StatusPagePrivateUser) => {
return formatUserForSCIM(user, req, req.params["statusPageScimId"]!, "status-page");
},
);
// Paginate the results
const paginatedUsers: Array<JSONObject> = users.slice(
(startIndex - 1) * count,
startIndex * count,
);
logger.debug(`Status Page SCIM Users response prepared with ${users.length} users`);
return Response.sendJsonObjectResponse(req, res, {
schemas: ["urn:ietf:params:scim:api:messages:2.0:ListResponse"],
totalResults: users.length,
startIndex: startIndex,
itemsPerPage: paginatedUsers.length,
Resources: paginatedUsers,
});
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Get Individual Status Page User - GET /status-page-scim/v2/Users/{id}
router.get(
"/status-page-scim/v2/:statusPageScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`Status Page SCIM Get individual user request for userId: ${req.params["userId"]}, statusPageScimId: ${req.params["statusPageScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const statusPageId: ObjectID = bearerData["statusPageId"] as ObjectID;
const userId: string = req.params["userId"]!;
logger.debug(
`Status Page SCIM Get user - statusPageId: ${statusPageId}, userId: ${userId}`,
);
if (!userId) {
throw new BadRequestException("User ID is required");
}
// Check if user exists and belongs to this status page
const statusPageUser: StatusPagePrivateUser | null = await StatusPagePrivateUserService.findOneBy({
query: {
statusPageId: statusPageId,
_id: new ObjectID(userId),
},
select: {
_id: true,
email: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
if (!statusPageUser) {
logger.debug(
`Status Page SCIM Get user - user not found for userId: ${userId}`,
);
throw new NotFoundException(
"User not found or not part of this status page",
);
}
const user: JSONObject = formatUserForSCIM(statusPageUser, req, req.params["statusPageScimId"]!, "status-page");
logger.debug(
`Status Page SCIM Get user - returning user with id: ${statusPageUser.id}`,
);
return Response.sendJsonObjectResponse(req, res, user);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Create Status Page User - POST /status-page-scim/v2/Users
router.post(
"/status-page-scim/v2/:statusPageScimId/Users",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`Status Page SCIM Create user request for statusPageScimId: ${req.params["statusPageScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const statusPageId: ObjectID = bearerData["statusPageId"] as ObjectID;
const scimConfig: StatusPageSCIM = bearerData["scimConfig"] as StatusPageSCIM;
if (!scimConfig.autoProvisionUsers) {
throw new BadRequestException(
"Auto-provisioning is disabled for this status page",
);
}
const scimUser: JSONObject = req.body;
logger.debug(
`Status Page SCIM Create user - statusPageId: ${statusPageId}`,
);
logger.debug(
`Request body for Status Page SCIM Create user: ${JSON.stringify(scimUser, null, 2)}`,
);
// Extract user data from SCIM payload
const email: string =
(scimUser["userName"] as string) ||
((scimUser["emails"] as JSONObject[])?.[0]?.["value"] as string);
if (!email) {
throw new BadRequestException("Email is required for user creation");
}
logger.debug(
`Status Page SCIM Create user - email: ${email}`,
);
// Check if user already exists for this status page
let user: StatusPagePrivateUser | null = await StatusPagePrivateUserService.findOneBy({
query: {
statusPageId: statusPageId,
email: new Email(email),
},
select: {
_id: true,
email: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
if (!user) {
logger.debug(
`Status Page SCIM Create user - creating new user with email: ${email}`,
);
// Create new status page private user
user = await StatusPagePrivateUserService.create({
data: {
statusPageId: statusPageId,
email: new Email(email),
} as any,
props: { isRoot: true },
});
} else {
logger.debug(
`Status Page SCIM Create user - user already exists with id: ${user.id}`,
);
}
const createdUser: JSONObject = formatUserForSCIM(user, req, req.params["statusPageScimId"]!, "status-page");
logger.debug(
`Status Page SCIM Create user - returning created user with id: ${user.id}`,
);
res.status(201);
return Response.sendJsonObjectResponse(req, res, createdUser);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Update Status Page User - PUT /status-page-scim/v2/Users/{id}
router.put(
"/status-page-scim/v2/:statusPageScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`Status Page SCIM Update user request for userId: ${req.params["userId"]}, statusPageScimId: ${req.params["statusPageScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const statusPageId: ObjectID = bearerData["statusPageId"] as ObjectID;
const userId: string = req.params["userId"]!;
const scimUser: JSONObject = req.body;
logger.debug(
`Status Page SCIM Update user - statusPageId: ${statusPageId}, userId: ${userId}`,
);
logger.debug(
`Request body for Status Page SCIM Update user: ${JSON.stringify(scimUser, null, 2)}`,
);
if (!userId) {
throw new BadRequestException("User ID is required");
}
// Check if user exists and belongs to this status page
const statusPageUser: StatusPagePrivateUser | null = await StatusPagePrivateUserService.findOneBy({
query: {
statusPageId: statusPageId,
_id: new ObjectID(userId),
},
select: {
_id: true,
email: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
if (!statusPageUser) {
logger.debug(
`Status Page SCIM Update user - user not found for userId: ${userId}`,
);
throw new NotFoundException(
"User not found or not part of this status page",
);
}
// Update user information
const email: string =
(scimUser["userName"] as string) ||
((scimUser["emails"] as JSONObject[])?.[0]?.["value"] as string);
const active: boolean = scimUser["active"] as boolean;
logger.debug(
`Status Page SCIM Update user - email: ${email}, active: ${active}`,
);
// Handle user deactivation by deleting from status page
if (active === false) {
logger.debug(
`Status Page SCIM Update user - user marked as inactive, removing from status page`,
);
const scimConfig: StatusPageSCIM = bearerData["scimConfig"] as StatusPageSCIM;
if (scimConfig.autoDeprovisionUsers) {
await StatusPagePrivateUserService.deleteOneById({
id: new ObjectID(userId),
props: { isRoot: true },
});
logger.debug(
`Status Page SCIM Update user - user removed from status page`,
);
// Return empty response for deleted user
return Response.sendJsonObjectResponse(req, res, {});
}
}
// Prepare update data
const updateData: {
email?: Email;
} = {};
if (email && email !== statusPageUser.email?.toString()) {
updateData.email = new Email(email);
}
// Only update if there are changes
if (Object.keys(updateData).length > 0) {
logger.debug(
`Status Page SCIM Update user - updating user with data: ${JSON.stringify(updateData)}`,
);
await StatusPagePrivateUserService.updateOneById({
id: new ObjectID(userId),
data: updateData,
props: { isRoot: true },
});
logger.debug(`Status Page SCIM Update user - user updated successfully`);
// Fetch updated user
const updatedUser: StatusPagePrivateUser | null = await StatusPagePrivateUserService.findOneById({
id: new ObjectID(userId),
select: {
_id: true,
email: true,
createdAt: true,
updatedAt: true,
},
props: { isRoot: true },
});
if (updatedUser) {
const user: JSONObject = formatUserForSCIM(updatedUser, req, req.params["statusPageScimId"]!, "status-page");
return Response.sendJsonObjectResponse(req, res, user);
}
}
logger.debug(
`Status Page SCIM Update user - no updates made, returning existing user`,
);
// If no updates were made, return the existing user
const user: JSONObject = formatUserForSCIM(statusPageUser, req, req.params["statusPageScimId"]!, "status-page");
return Response.sendJsonObjectResponse(req, res, user);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
// Delete Status Page User - DELETE /status-page-scim/v2/Users/{id}
router.delete(
"/status-page-scim/v2/:statusPageScimId/Users/:userId",
SCIMMiddleware.isAuthorizedSCIMRequest,
async (req: ExpressRequest, res: ExpressResponse): Promise<void> => {
try {
logger.debug(
`Status Page SCIM Delete user request for userId: ${req.params["userId"]}, statusPageScimId: ${req.params["statusPageScimId"]}`,
);
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
const bearerData: JSONObject =
oneuptimeRequest.bearerTokenData as JSONObject;
const statusPageId: ObjectID = bearerData["statusPageId"] as ObjectID;
const scimConfig: StatusPageSCIM = bearerData["scimConfig"] as StatusPageSCIM;
const userId: string = req.params["userId"]!;
if (!scimConfig.autoDeprovisionUsers) {
throw new BadRequestException(
"Auto-deprovisioning is disabled for this status page",
);
}
logger.debug(
`Status Page SCIM Delete user - statusPageId: ${statusPageId}, userId: ${userId}`,
);
if (!userId) {
throw new BadRequestException("User ID is required");
}
// Check if user exists and belongs to this status page
const statusPageUser: StatusPagePrivateUser | null = await StatusPagePrivateUserService.findOneBy({
query: {
statusPageId: statusPageId,
_id: new ObjectID(userId),
},
select: {
_id: true,
},
props: { isRoot: true },
});
if (!statusPageUser) {
logger.debug(
`Status Page SCIM Delete user - user not found for userId: ${userId}`,
);
// SCIM spec says to return 404 for non-existent resources
throw new NotFoundException("User not found");
}
// Delete the user from status page
await StatusPagePrivateUserService.deleteOneById({
id: new ObjectID(userId),
props: { isRoot: true },
});
logger.debug(
`Status Page SCIM Delete user - user deleted successfully for userId: ${userId}`,
);
// Return 204 No Content for successful deletion
res.status(204);
return Response.sendEmptySuccessResponse(req, res);
} catch (err) {
logger.error(err);
return Response.sendErrorResponse(req, res, err as BadRequestException);
}
},
);
export default router;

View File

@@ -1,8 +1,10 @@
import AuthenticationAPI from "./API/Authentication";
import ResellerAPI from "./API/Reseller";
import SsoAPI from "./API/SSO";
import SCIMAPI from "./API/SCIM";
import StatusPageAuthenticationAPI from "./API/StatusPageAuthentication";
import StatusPageSsoAPI from "./API/StatusPageSSO";
import StatusPageSCIMAPI from "./API/StatusPageSCIM";
import FeatureSet from "Common/Server/Types/FeatureSet";
import Express, { ExpressApplication } from "Common/Server/Utils/Express";
import "ejs";
@@ -19,6 +21,10 @@ const IdentityFeatureSet: FeatureSet = {
app.use([`/${APP_NAME}`, "/"], SsoAPI);
app.use([`/${APP_NAME}`, "/"], SCIMAPI);
app.use([`/${APP_NAME}`, "/"], StatusPageSCIMAPI);
app.use([`/${APP_NAME}`, "/"], StatusPageSsoAPI);
app.use(

View File

@@ -0,0 +1,262 @@
import { ExpressRequest } from "Common/Server/Utils/Express";
import logger from "Common/Server/Utils/Logger";
import { JSONObject } from "Common/Types/JSON";
import Email from "Common/Types/Email";
import Name from "Common/Types/Name";
import ObjectID from "Common/Types/ObjectID";
/**
* Shared SCIM utility functions for both Project SCIM and Status Page SCIM
*/
// Base interface for SCIM user-like objects - compatible with User model
export interface SCIMUser {
id?: ObjectID | null;
email?: Email;
name?: Name | string;
createdAt?: Date;
updatedAt?: Date;
}
/**
* Parse name information from SCIM user payload
*/
export const parseNameFromSCIM: (scimUser: JSONObject) => string = (
scimUser: JSONObject,
): string => {
logger.debug(
`SCIM - Parsing name from SCIM user: ${JSON.stringify(scimUser, null, 2)}`,
);
const givenName: string =
((scimUser["name"] as JSONObject)?.["givenName"] as string) || "";
const familyName: string =
((scimUser["name"] as JSONObject)?.["familyName"] as string) || "";
const formattedName: string = (scimUser["name"] as JSONObject)?.[
"formatted"
] as string;
// Construct full name: prefer formatted, then combine given+family, then fallback to displayName
if (formattedName) {
return formattedName;
} else if (givenName || familyName) {
return `${givenName} ${familyName}`.trim();
} else if (scimUser["displayName"]) {
return scimUser["displayName"] as string;
}
return "";
};
/**
* Parse full name into SCIM name format
*/
export const parseNameToSCIMFormat: (fullName: string) => {
givenName: string;
familyName: string;
formatted: string;
} = (
fullName: string,
): { givenName: string; familyName: string; formatted: string } => {
const nameParts: string[] = fullName.trim().split(/\s+/);
const givenName: string = nameParts[0] || "";
const familyName: string = nameParts.slice(1).join(" ") || "";
return {
givenName,
familyName,
formatted: fullName,
};
};
/**
* Format user object for SCIM response
*/
export const formatUserForSCIM: (
user: SCIMUser,
req: ExpressRequest,
scimId: string,
scimType: "project" | "status-page",
) => JSONObject = (
user: SCIMUser,
req: ExpressRequest,
scimId: string,
scimType: "project" | "status-page",
): JSONObject => {
const baseUrl: string = `${req.protocol}://${req.get("host")}`;
const userName: string = user.email?.toString() || "";
const fullName: string = user.name?.toString() || userName.split("@")[0] || "Unknown User";
const nameData: { givenName: string; familyName: string; formatted: string } =
parseNameToSCIMFormat(fullName);
// Determine the correct endpoint path based on SCIM type
const endpointPath: string = scimType === "project"
? `/scim/v2/${scimId}/Users/${user.id?.toString()}`
: `/status-page-scim/v2/${scimId}/Users/${user.id?.toString()}`;
return {
schemas: ["urn:ietf:params:scim:schemas:core:2.0:User"],
id: user.id?.toString(),
userName: userName,
displayName: nameData.formatted,
name: {
formatted: nameData.formatted,
familyName: nameData.familyName,
givenName: nameData.givenName,
},
emails: [
{
value: userName,
type: "work",
primary: true,
},
],
active: true,
meta: {
resourceType: "User",
created: user.createdAt?.toISOString(),
lastModified: user.updatedAt?.toISOString(),
location: `${baseUrl}${endpointPath}`,
},
};
};
/**
* Extract email from SCIM user payload
*/
export const extractEmailFromSCIM: (scimUser: JSONObject) => string = (
scimUser: JSONObject,
): string => {
return (
(scimUser["userName"] as string) ||
((scimUser["emails"] as JSONObject[])?.[0]?.["value"] as string) ||
""
);
};
/**
* Extract active status from SCIM user payload
*/
export const extractActiveFromSCIM: (scimUser: JSONObject) => boolean = (
scimUser: JSONObject,
): boolean => {
return scimUser["active"] !== false; // Default to true if not specified
};
/**
* Generate SCIM ServiceProviderConfig response
*/
export const generateServiceProviderConfig: (
req: ExpressRequest,
scimId: string,
scimType: "project" | "status-page",
documentationUrl?: string,
) => JSONObject = (
req: ExpressRequest,
scimId: string,
scimType: "project" | "status-page",
documentationUrl: string = "https://oneuptime.com/docs/identity/scim",
): JSONObject => {
const baseUrl: string = `${req.protocol}://${req.get("host")}`;
const endpointPath: string = scimType === "project"
? `/scim/v2/${scimId}`
: `/status-page-scim/v2/${scimId}`;
return {
schemas: [
"urn:ietf:params:scim:schemas:core:2.0:ServiceProviderConfig",
],
documentationUri: documentationUrl,
patch: {
supported: true,
},
bulk: {
supported: true,
maxOperations: 1000,
maxPayloadSize: 1048576,
},
filter: {
supported: true,
maxResults: 200,
},
changePassword: {
supported: false,
},
sort: {
supported: true,
},
etag: {
supported: false,
},
authenticationSchemes: [
{
type: "httpbearer",
name: "HTTP Bearer",
description: "Authentication scheme using HTTP Bearer Token",
primary: true,
},
],
meta: {
location: `${baseUrl}${endpointPath}/ServiceProviderConfig`,
resourceType: "ServiceProviderConfig",
created: "2023-01-01T00:00:00Z",
lastModified: "2023-01-01T00:00:00Z",
},
};
};
/**
* Generate SCIM ListResponse for users
*/
export const generateUsersListResponse: (
users: JSONObject[],
startIndex: number,
totalResults: number,
) => JSONObject = (
users: JSONObject[],
startIndex: number,
totalResults: number,
): JSONObject => {
return {
schemas: ["urn:ietf:params:scim:api:messages:2.0:ListResponse"],
totalResults: totalResults,
startIndex: startIndex,
itemsPerPage: users.length,
Resources: users,
};
};
/**
* Parse query parameters for SCIM list requests
*/
export const parseSCIMQueryParams: (req: ExpressRequest) => {
startIndex: number;
count: number;
} = (req: ExpressRequest): { startIndex: number; count: number } => {
const startIndex: number = parseInt(req.query["startIndex"] as string) || 1;
const count: number = Math.min(
parseInt(req.query["count"] as string) || 100,
200, // SCIM recommended max
);
return { startIndex, count };
};
/**
* Log SCIM operation with consistent format
*/
export const logSCIMOperation: (
operation: string,
scimType: "project" | "status-page",
scimId: string,
details?: string,
) => void = (
operation: string,
scimType: "project" | "status-page",
scimId: string,
details?: string,
): void => {
const logPrefix: string = scimType === "project" ? "Project SCIM" : "Status Page SCIM";
const message: string = `${logPrefix} ${operation} - scimId: ${scimId}${details ? `, ${details}` : ""}`;
logger.debug(message);
};

View File

@@ -114,6 +114,7 @@ import StatusPageOwnerTeam from "./StatusPageOwnerTeam";
import StatusPageOwnerUser from "./StatusPageOwnerUser";
import StatusPagePrivateUser from "./StatusPagePrivateUser";
import StatusPageResource from "./StatusPageResource";
import StatusPageSCIM from "./StatusPageSCIM";
import StatusPageSSO from "./StatusPageSso";
import StatusPageSubscriber from "./StatusPageSubscriber";
// Team
@@ -179,6 +180,7 @@ import ProjectUser from "./ProjectUser";
import OnCallDutyPolicyUserOverride from "./OnCallDutyPolicyUserOverride";
import MonitorFeed from "./MonitorFeed";
import MetricType from "./MetricType";
import ProjectSCIM from "./ProjectSCIM";
const AllModelTypes: Array<{
new (): BaseModel;
@@ -276,6 +278,7 @@ const AllModelTypes: Array<{
ProjectSSO,
StatusPageSSO,
StatusPageSCIM,
MonitorProbe,
@@ -380,6 +383,10 @@ const AllModelTypes: Array<{
MetricType,
OnCallDutyPolicyTimeLog,
ProjectSCIM,
StatusPageSCIM
];
const modelTypeMap: { [key: string]: { new (): BaseModel } } = {};

View File

@@ -0,0 +1,451 @@
import Project from "./Project";
import Team from "./Team";
import User from "./User";
import BaseModel from "./DatabaseBaseModel/DatabaseBaseModel";
import Route from "../../Types/API/Route";
import { PlanType } from "../../Types/Billing/SubscriptionPlan";
import ColumnAccessControl from "../../Types/Database/AccessControl/ColumnAccessControl";
import TableAccessControl from "../../Types/Database/AccessControl/TableAccessControl";
import TableBillingAccessControl from "../../Types/Database/AccessControl/TableBillingAccessControl";
import ColumnLength from "../../Types/Database/ColumnLength";
import ColumnType from "../../Types/Database/ColumnType";
import CrudApiEndpoint from "../../Types/Database/CrudApiEndpoint";
import TableColumn from "../../Types/Database/TableColumn";
import TableColumnType from "../../Types/Database/TableColumnType";
import TableMetadata from "../../Types/Database/TableMetadata";
import TenantColumn from "../../Types/Database/TenantColumn";
import UniqueColumnBy from "../../Types/Database/UniqueColumnBy";
import IconProp from "../../Types/Icon/IconProp";
import ObjectID from "../../Types/ObjectID";
import Permission from "../../Types/Permission";
import {
Column,
Entity,
Index,
JoinColumn,
JoinTable,
ManyToMany,
ManyToOne,
} from "typeorm";
@TableBillingAccessControl({
create: PlanType.Scale,
read: PlanType.Scale,
update: PlanType.Scale,
delete: PlanType.Scale,
})
@TenantColumn("projectId")
@TableAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
delete: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.DeleteProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@CrudApiEndpoint(new Route("/project-scim"))
@TableMetadata({
tableName: "ProjectSCIM",
singularName: "SCIM",
pluralName: "SCIM",
icon: IconProp.Lock,
tableDescription: "Manage SCIM auto-provisioning for your project",
})
@Entity({
name: "ProjectSCIM",
})
export default class ProjectSCIM extends BaseModel {
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "projectId",
type: TableColumnType.Entity,
modelType: Project,
title: "Project",
description: "Relation to Project Resource in which this object belongs",
})
@ManyToOne(
() => {
return Project;
},
{
eager: false,
nullable: true,
onDelete: "CASCADE",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "projectId" })
public project?: Project = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@Index()
@TableColumn({
type: TableColumnType.ObjectID,
required: true,
canReadOnRelationQuery: true,
title: "Project ID",
description: "ID of your OneUptime Project in which this object belongs",
})
@Column({
type: ColumnType.ObjectID,
nullable: false,
transformer: ObjectID.getDatabaseTransformer(),
})
public projectId?: ObjectID = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
required: true,
type: TableColumnType.ShortText,
canReadOnRelationQuery: true,
title: "Name",
description: "Any friendly name for this SCIM configuration",
})
@Column({
nullable: false,
type: ColumnType.ShortText,
length: ColumnLength.ShortText,
})
@UniqueColumnBy("projectId")
public name?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
required: false,
type: TableColumnType.LongText,
title: "Description",
description: "Friendly description to help you remember",
})
@Column({
nullable: true,
type: ColumnType.LongText,
length: ColumnLength.LongText,
})
public description?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
required: true,
type: TableColumnType.LongText,
title: "Bearer Token",
description: "Bearer token for SCIM authentication. Keep this secure.",
})
@Column({
nullable: false,
type: ColumnType.LongText,
length: ColumnLength.LongText,
})
public bearerToken?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
required: false,
type: TableColumnType.EntityArray,
modelType: Team,
title: "Default Teams",
description: "Default teams that new users will be added to via SCIM",
})
@ManyToMany(
() => {
return Team;
},
{ eager: false },
)
@JoinTable({
name: "ProjectScimTeam",
inverseJoinColumn: {
name: "teamId",
referencedColumnName: "_id",
},
joinColumn: {
name: "projectScimId",
referencedColumnName: "_id",
},
})
public teams?: Array<Team> = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
isDefaultValueColumn: true,
type: TableColumnType.Boolean,
title: "Auto Provision Users",
description: "Automatically create users when they are added via SCIM",
defaultValue: true,
})
@Column({
type: ColumnType.Boolean,
default: true,
})
public autoProvisionUsers?: boolean = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditProjectSSO,
],
})
@TableColumn({
isDefaultValueColumn: true,
type: TableColumnType.Boolean,
title: "Auto Deprovision Users",
description: "Automatically remove users when they are removed via SCIM",
defaultValue: true,
})
@Column({
type: ColumnType.Boolean,
default: true,
})
public autoDeprovisionUsers?: boolean = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "createdByUserId",
type: TableColumnType.Entity,
modelType: User,
title: "Created by User",
description:
"Relation to User who created this object (if this object was created by a User)",
})
@ManyToOne(
() => {
return User;
},
{
eager: false,
nullable: true,
onDelete: "SET NULL",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "createdByUserId" })
public createdByUser?: User = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateProjectSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@TableColumn({
type: TableColumnType.ObjectID,
title: "Created by User ID",
description:
"User ID who created this object (if this object was created by a User)",
})
@Column({
type: ColumnType.ObjectID,
nullable: true,
transformer: ObjectID.getDatabaseTransformer(),
})
public createdByUserId?: ObjectID = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "deletedByUserId",
type: TableColumnType.Entity,
modelType: User,
title: "Deleted by User",
description:
"Relation to User who deleted this object (if this object was deleted by a User)",
})
@ManyToOne(
() => {
return User;
},
{
cascade: false,
eager: false,
nullable: true,
onDelete: "SET NULL",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "deletedByUserId" })
public deletedByUser?: User = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadProjectSSO,
],
update: [],
})
@TableColumn({
type: TableColumnType.ObjectID,
title: "Deleted by User ID",
description:
"User ID who deleted this object (if this object was deleted by a User)",
})
@Column({
type: ColumnType.ObjectID,
nullable: true,
transformer: ObjectID.getDatabaseTransformer(),
})
public deletedByUserId?: ObjectID = undefined;
}

View File

@@ -0,0 +1,473 @@
import Project from "./Project";
import StatusPage from "./StatusPage";
import User from "./User";
import BaseModel from "./DatabaseBaseModel/DatabaseBaseModel";
import Route from "../../Types/API/Route";
import { PlanType } from "../../Types/Billing/SubscriptionPlan";
import ColumnAccessControl from "../../Types/Database/AccessControl/ColumnAccessControl";
import TableAccessControl from "../../Types/Database/AccessControl/TableAccessControl";
import TableBillingAccessControl from "../../Types/Database/AccessControl/TableBillingAccessControl";
import CanAccessIfCanReadOn from "../../Types/Database/CanAccessIfCanReadOn";
import ColumnLength from "../../Types/Database/ColumnLength";
import ColumnType from "../../Types/Database/ColumnType";
import CrudApiEndpoint from "../../Types/Database/CrudApiEndpoint";
import EnableDocumentation from "../../Types/Database/EnableDocumentation";
import TableColumn from "../../Types/Database/TableColumn";
import TableColumnType from "../../Types/Database/TableColumnType";
import TableMetadata from "../../Types/Database/TableMetadata";
import TenantColumn from "../../Types/Database/TenantColumn";
import UniqueColumnBy from "../../Types/Database/UniqueColumnBy";
import IconProp from "../../Types/Icon/IconProp";
import ObjectID from "../../Types/ObjectID";
import Permission from "../../Types/Permission";
import {
Column,
Entity,
Index,
JoinColumn,
ManyToOne,
} from "typeorm";
@EnableDocumentation()
@TableBillingAccessControl({
create: PlanType.Scale,
read: PlanType.Scale,
update: PlanType.Scale,
delete: PlanType.Scale,
})
@CanAccessIfCanReadOn("statusPage")
@TenantColumn("projectId")
@TableAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
delete: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.DeleteStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@CrudApiEndpoint(new Route("/status-page-scim"))
@TableMetadata({
tableName: "StatusPageSCIM",
singularName: "Status Page SCIM",
pluralName: "Status Page SCIM",
icon: IconProp.Lock,
tableDescription: "Manage SCIM auto-provisioning for your status page",
})
@Entity({
name: "StatusPageSCIM",
})
export default class StatusPageSCIM extends BaseModel {
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "projectId",
type: TableColumnType.Entity,
modelType: Project,
title: "Project",
description: "Relation to Project Resource in which this object belongs",
})
@ManyToOne(
() => {
return Project;
},
{
eager: false,
nullable: true,
onDelete: "CASCADE",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "projectId" })
public project?: Project = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@Index()
@TableColumn({
type: TableColumnType.ObjectID,
required: true,
canReadOnRelationQuery: true,
title: "Project ID",
description: "ID of your OneUptime Project in which this object belongs",
})
@Column({
type: ColumnType.ObjectID,
nullable: false,
transformer: ObjectID.getDatabaseTransformer(),
})
public projectId?: ObjectID = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "statusPageId",
type: TableColumnType.Entity,
modelType: StatusPage,
title: "Status Page",
description:
"Relation to Status Page Resource in which this object belongs",
})
@ManyToOne(
() => {
return StatusPage;
},
{
eager: false,
nullable: true,
onDelete: "CASCADE",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "statusPageId" })
public statusPage?: StatusPage = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@Index()
@TableColumn({
type: TableColumnType.ObjectID,
required: true,
title: "Status Page ID",
description: "ID of your Status Page resource where this object belongs",
})
@Column({
type: ColumnType.ObjectID,
nullable: false,
transformer: ObjectID.getDatabaseTransformer(),
})
public statusPageId?: ObjectID = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@TableColumn({
required: true,
type: TableColumnType.ShortText,
canReadOnRelationQuery: true,
title: "Name",
description: "Any friendly name for this SCIM configuration",
})
@Column({
nullable: false,
type: ColumnType.ShortText,
length: ColumnLength.ShortText,
})
@UniqueColumnBy("statusPageId")
public name?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@TableColumn({
required: false,
type: TableColumnType.LongText,
title: "Description",
description: "Friendly description to help you remember",
})
@Column({
nullable: true,
type: ColumnType.LongText,
length: ColumnLength.LongText,
})
public description?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ReadStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@TableColumn({
required: true,
type: TableColumnType.LongText,
title: "Bearer Token",
description: "Bearer token for SCIM authentication. Keep this secure.",
})
@Column({
nullable: false,
type: ColumnType.LongText,
length: ColumnLength.LongText,
})
public bearerToken?: string = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@TableColumn({
isDefaultValueColumn: true,
type: TableColumnType.Boolean,
title: "Auto Provision Users",
description: "Automatically create status page users when they are added via SCIM",
defaultValue: true,
})
@Column({
type: ColumnType.Boolean,
default: true,
})
public autoProvisionUsers?: boolean = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.EditStatusPageSSO,
],
})
@TableColumn({
isDefaultValueColumn: true,
type: TableColumnType.Boolean,
title: "Auto Deprovision Users",
description: "Automatically remove status page users when they are removed via SCIM",
defaultValue: true,
})
@Column({
type: ColumnType.Boolean,
default: true,
})
public autoDeprovisionUsers?: boolean = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "createdByUserId",
type: TableColumnType.Entity,
modelType: User,
title: "Created by User",
description:
"Relation to User who created this object (if this object was created by a User)",
})
@ManyToOne(
() => {
return User;
},
{
eager: false,
nullable: true,
onDelete: "SET NULL",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "createdByUserId" })
public createdByUser?: User = undefined;
@ColumnAccessControl({
create: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.CreateStatusPageSSO,
],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
type: TableColumnType.ObjectID,
title: "Created by User ID",
description:
"User ID who created this object (if this object was created by a User)",
})
@Column({
type: ColumnType.ObjectID,
nullable: true,
transformer: ObjectID.getDatabaseTransformer(),
})
public createdByUserId?: ObjectID = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
manyToOneRelationColumn: "deletedByUserId",
type: TableColumnType.Entity,
modelType: User,
title: "Deleted by User",
description:
"Relation to User who deleted this object (if this object was deleted by a User)",
})
@ManyToOne(
() => {
return User;
},
{
cascade: false,
eager: false,
nullable: true,
onDelete: "SET NULL",
orphanedRowAction: "nullify",
},
)
@JoinColumn({ name: "deletedByUserId" })
public deletedByUser?: User = undefined;
@ColumnAccessControl({
create: [],
read: [
Permission.ProjectOwner,
Permission.ProjectAdmin,
Permission.ProjectMember,
Permission.ReadStatusPageSSO,
],
update: [],
})
@TableColumn({
type: TableColumnType.ObjectID,
title: "Deleted by User ID",
description:
"User ID who deleted this object (if this object was deleted by a User)",
})
@Column({
type: ColumnType.ObjectID,
nullable: true,
transformer: ObjectID.getDatabaseTransformer(),
})
public deletedByUserId?: ObjectID = undefined;
}

View File

@@ -4,6 +4,7 @@ import {
DashboardRoute,
AppApiRoute,
StatusPageApiRoute,
DocsRoute,
} from "../ServiceRoute";
import BillingConfig from "./BillingConfig";
import Protocol from "../Types/API/Protocol";
@@ -150,6 +151,13 @@ export const AdminDashboardHostname: Hostname = Hostname.fromString(
}`,
);
export const DocsHostname: Hostname = Hostname.fromString(
`${process.env["SERVER_DOCS_HOSTNAME"] || "localhost"}:${
process.env["DOCS_PORT"] || 80
}`,
);
export const Env: string = process.env["NODE_ENV"] || "production";
// Redis does not require password.
@@ -318,6 +326,13 @@ export const AccountsClientUrl: URL = new URL(
AccountsRoute,
);
export const DocsClientUrl: URL = new URL(
HttpProtocol,
Host,
DocsRoute
);
export const DisableTelemetry: boolean =
process.env["DISABLE_TELEMETRY"] === "true";

View File

@@ -0,0 +1,67 @@
import { MigrationInterface, QueryRunner } from "typeorm";
export class MigrationName1754304193228 implements MigrationInterface {
public name = "MigrationName1754304193228";
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`CREATE TABLE "ProjectSCIM" ("_id" uuid NOT NULL DEFAULT uuid_generate_v4(), "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "deletedAt" TIMESTAMP WITH TIME ZONE, "version" integer NOT NULL, "projectId" uuid NOT NULL, "name" character varying(100) NOT NULL, "description" character varying(500), "bearerToken" character varying(500) NOT NULL, "autoProvisionUsers" boolean NOT NULL DEFAULT true, "autoDeprovisionUsers" boolean NOT NULL DEFAULT true, "isEnabled" boolean NOT NULL DEFAULT false, "createdByUserId" uuid, "deletedByUserId" uuid, CONSTRAINT "PK_51e71d70211675a5c918aee4e68" PRIMARY KEY ("_id"))`,
);
await queryRunner.query(
`CREATE INDEX "IDX_f916360335859c26c4d7051239" ON "ProjectSCIM" ("projectId") `,
);
await queryRunner.query(
`CREATE TABLE "ProjectScimTeam" ("projectScimId" uuid NOT NULL, "teamId" uuid NOT NULL, CONSTRAINT "PK_db724b66b4fa8c880ce5ccf820b" PRIMARY KEY ("projectScimId", "teamId"))`,
);
await queryRunner.query(
`CREATE INDEX "IDX_b9a28efd66600267f0e9de0731" ON "ProjectScimTeam" ("projectScimId") `,
);
await queryRunner.query(
`CREATE INDEX "IDX_bb0eda2ef0c773f975e9ad8448" ON "ProjectScimTeam" ("teamId") `,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" ADD CONSTRAINT "FK_f916360335859c26c4d7051239b" FOREIGN KEY ("projectId") REFERENCES "Project"("_id") ON DELETE CASCADE ON UPDATE NO ACTION`,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" ADD CONSTRAINT "FK_5d5d587984f156e5215d51daff7" FOREIGN KEY ("createdByUserId") REFERENCES "User"("_id") ON DELETE SET NULL ON UPDATE NO ACTION`,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" ADD CONSTRAINT "FK_9cadda4fc2af268b5670d02bf76" FOREIGN KEY ("deletedByUserId") REFERENCES "User"("_id") ON DELETE SET NULL ON UPDATE NO ACTION`,
);
await queryRunner.query(
`ALTER TABLE "ProjectScimTeam" ADD CONSTRAINT "FK_b9a28efd66600267f0e9de0731b" FOREIGN KEY ("projectScimId") REFERENCES "ProjectSCIM"("_id") ON DELETE CASCADE ON UPDATE CASCADE`,
);
await queryRunner.query(
`ALTER TABLE "ProjectScimTeam" ADD CONSTRAINT "FK_bb0eda2ef0c773f975e9ad8448a" FOREIGN KEY ("teamId") REFERENCES "Team"("_id") ON DELETE CASCADE ON UPDATE CASCADE`,
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "ProjectScimTeam" DROP CONSTRAINT "FK_bb0eda2ef0c773f975e9ad8448a"`,
);
await queryRunner.query(
`ALTER TABLE "ProjectScimTeam" DROP CONSTRAINT "FK_b9a28efd66600267f0e9de0731b"`,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" DROP CONSTRAINT "FK_9cadda4fc2af268b5670d02bf76"`,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" DROP CONSTRAINT "FK_5d5d587984f156e5215d51daff7"`,
);
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" DROP CONSTRAINT "FK_f916360335859c26c4d7051239b"`,
);
await queryRunner.query(
`DROP INDEX "public"."IDX_bb0eda2ef0c773f975e9ad8448"`,
);
await queryRunner.query(
`DROP INDEX "public"."IDX_b9a28efd66600267f0e9de0731"`,
);
await queryRunner.query(`DROP TABLE "ProjectScimTeam"`);
await queryRunner.query(
`DROP INDEX "public"."IDX_f916360335859c26c4d7051239"`,
);
await queryRunner.query(`DROP TABLE "ProjectSCIM"`);
}
}

View File

@@ -0,0 +1,17 @@
import { MigrationInterface, QueryRunner } from "typeorm";
export class MigrationName1754315774827 implements MigrationInterface {
public name = "MigrationName1754315774827";
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" DROP COLUMN "isEnabled"`,
);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(
`ALTER TABLE "ProjectSCIM" ADD "isEnabled" boolean NOT NULL DEFAULT false`,
);
}
}

View File

@@ -0,0 +1,30 @@
import { MigrationInterface, QueryRunner } from "typeorm";
export class MigrationName1754384418632 implements MigrationInterface {
public name = 'MigrationName1754384418632'
public async up(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`CREATE TABLE "StatusPageSCIM" ("_id" uuid NOT NULL DEFAULT uuid_generate_v4(), "createdAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "updatedAt" TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT now(), "deletedAt" TIMESTAMP WITH TIME ZONE, "version" integer NOT NULL, "projectId" uuid NOT NULL, "statusPageId" uuid NOT NULL, "name" character varying(100) NOT NULL, "description" character varying(500), "bearerToken" character varying(500) NOT NULL, "autoProvisionUsers" boolean NOT NULL DEFAULT true, "autoDeprovisionUsers" boolean NOT NULL DEFAULT true, "createdByUserId" uuid, "deletedByUserId" uuid, CONSTRAINT "PK_9d65d486be515b9608347cf66d4" PRIMARY KEY ("_id"))`);
await queryRunner.query(`CREATE INDEX "IDX_0a241118fe6b4a8665deef444b" ON "StatusPageSCIM" ("projectId") `);
await queryRunner.query(`CREATE INDEX "IDX_7200e368657773fde2836c57eb" ON "StatusPageSCIM" ("statusPageId") `);
await queryRunner.query(`ALTER TABLE "OnCallDutyPolicyScheduleLayer" ALTER COLUMN "rotation" SET DEFAULT '{"_type":"Recurring","value":{"intervalType":"Day","intervalCount":{"_type":"PositiveNumber","value":1}}}'`);
await queryRunner.query(`ALTER TABLE "OnCallDutyPolicyScheduleLayer" ALTER COLUMN "restrictionTimes" SET DEFAULT '{"_type":"RestrictionTimes","value":{"restictionType":"None","dayRestrictionTimes":null,"weeklyRestrictionTimes":[]}}'`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" ADD CONSTRAINT "FK_0a241118fe6b4a8665deef444b2" FOREIGN KEY ("projectId") REFERENCES "Project"("_id") ON DELETE CASCADE ON UPDATE NO ACTION`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" ADD CONSTRAINT "FK_7200e368657773fde2836c57ebe" FOREIGN KEY ("statusPageId") REFERENCES "StatusPage"("_id") ON DELETE CASCADE ON UPDATE NO ACTION`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" ADD CONSTRAINT "FK_adb05dd1cbe0e734a76b3dbdcf1" FOREIGN KEY ("createdByUserId") REFERENCES "User"("_id") ON DELETE SET NULL ON UPDATE NO ACTION`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" ADD CONSTRAINT "FK_2fded7c784a5c2f56ad2553cb80" FOREIGN KEY ("deletedByUserId") REFERENCES "User"("_id") ON DELETE SET NULL ON UPDATE NO ACTION`);
}
public async down(queryRunner: QueryRunner): Promise<void> {
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" DROP CONSTRAINT "FK_2fded7c784a5c2f56ad2553cb80"`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" DROP CONSTRAINT "FK_adb05dd1cbe0e734a76b3dbdcf1"`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" DROP CONSTRAINT "FK_7200e368657773fde2836c57ebe"`);
await queryRunner.query(`ALTER TABLE "StatusPageSCIM" DROP CONSTRAINT "FK_0a241118fe6b4a8665deef444b2"`);
await queryRunner.query(`ALTER TABLE "OnCallDutyPolicyScheduleLayer" ALTER COLUMN "restrictionTimes" SET DEFAULT '{"_type": "RestrictionTimes", "value": {"restictionType": "None", "dayRestrictionTimes": null, "weeklyRestrictionTimes": []}}'`);
await queryRunner.query(`ALTER TABLE "OnCallDutyPolicyScheduleLayer" ALTER COLUMN "rotation" SET DEFAULT '{"_type": "Recurring", "value": {"intervalType": "Day", "intervalCount": {"_type": "PositiveNumber", "value": 1}}}'`);
await queryRunner.query(`DROP INDEX "public"."IDX_7200e368657773fde2836c57eb"`);
await queryRunner.query(`DROP INDEX "public"."IDX_0a241118fe6b4a8665deef444b"`);
await queryRunner.query(`DROP TABLE "StatusPageSCIM"`);
}
}

View File

@@ -146,6 +146,9 @@ import { MigrationName1753343522987 } from "./1753343522987-MigrationName";
import { MigrationName1753377161288 } from "./1753377161288-MigrationName";
import { AddPerformanceIndexes1753378524062 } from "./1753378524062-AddPerformanceIndexes";
import { MigrationName1753383711511 } from "./1753383711511-MigrationName";
import { MigrationName1754304193228 } from "./1754304193228-MigrationName";
import { MigrationName1754315774827 } from "./1754315774827-MigrationName";
import { MigrationName1754384418632 } from "./1754384418632-MigrationName";
export default [
InitialMigration,
@@ -296,4 +299,7 @@ export default [
MigrationName1753377161288,
AddPerformanceIndexes1753378524062,
MigrationName1753383711511,
MigrationName1754304193228,
MigrationName1754315774827,
MigrationName1754384418632
];

View File

@@ -17,6 +17,10 @@ export enum QueueName {
Workflow = "Workflow",
Worker = "Worker",
Telemetry = "Telemetry",
FluentIngest = "FluentIngest",
IncomingRequestIngest = "IncomingRequestIngest",
ServerMonitorIngest = "ServerMonitorIngest",
ProbeIngest = "ProbeIngest",
}
export type QueueJob = Job;
@@ -189,6 +193,7 @@ export default class Queue {
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
@@ -200,7 +205,16 @@ export default class Queue {
const failed: Job[] = await queue.getFailed(start, end);
return failed.map((job: Job) => {
return {
const result: {
id: string;
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
} = {
id: job.id || "unknown",
name: job.name || "unknown",
data: job.data as JSONObject,
@@ -209,6 +223,12 @@ export default class Queue {
finishedOn: job.finishedOn ? new Date(job.finishedOn) : null,
attemptsMade: job.attemptsMade || 0,
};
if (job.stacktrace && job.stacktrace.length > 0) {
result.stackTrace = job.stacktrace.join("\n");
}
return result;
});
}
}

View File

@@ -38,6 +38,9 @@ export default class ClusterKeyAuthorization {
} else if (req.headers && req.headers["clusterkey"]) {
// Header keys are automatically transformed to lowercase
clusterKey = req.headers["clusterkey"] as string;
} else if (req.headers && req.headers["x-clusterkey"]) {
// KEDA TriggerAuthentication sends headers with X- prefix
clusterKey = req.headers["x-clusterkey"] as string;
} else if (req.body && req.body.clusterKey) {
clusterKey = req.body.clusterKey;
} else {

View File

@@ -0,0 +1,129 @@
import ProjectSCIMService from "../Services/ProjectSCIMService";
import StatusPageSCIMService from "../Services/StatusPageSCIMService";
import {
ExpressRequest,
ExpressResponse,
NextFunction,
OneUptimeRequest,
} from "../Utils/Express";
import ObjectID from "../../Types/ObjectID";
import ProjectSCIM from "../../Models/DatabaseModels/ProjectSCIM";
import StatusPageSCIM from "../../Models/DatabaseModels/StatusPageSCIM";
import NotAuthorizedException from "../../Types/Exception/NotAuthorizedException";
import BadRequestException from "../../Types/Exception/BadRequestException";
import CaptureSpan from "../Utils/Telemetry/CaptureSpan";
import logger from "../Utils/Logger";
export default class SCIMMiddleware {
@CaptureSpan()
public static async isAuthorizedSCIMRequest(
req: ExpressRequest,
_res: ExpressResponse,
next: NextFunction,
): Promise<void> {
try {
const oneuptimeRequest: OneUptimeRequest = req as OneUptimeRequest;
// Extract SCIM ID from URL path (could be project or status page)
const scimId: string | undefined = req.params["projectScimId"] || req.params["statusPageScimId"];
if (!scimId) {
throw new BadRequestException("SCIM ID is required");
}
// Extract bearer token from Authorization header
let bearerToken: string | undefined;
if (req.headers?.["authorization"]) {
const authHeader: string = req.headers["authorization"] as string;
if (authHeader.startsWith("Bearer ")) {
bearerToken = authHeader.substring(7);
}
}
logger.debug(
`SCIM Authorization: scimId=${scimId}, bearerToken=${
bearerToken ? "***" : "missing"
}`,
);
if (!bearerToken) {
throw new NotAuthorizedException(
"Bearer token is required for SCIM authentication",
);
}
// Try to find Project SCIM configuration first
const projectScimConfig: ProjectSCIM | null = await ProjectSCIMService.findOneBy(
{
query: {
_id: new ObjectID(scimId),
bearerToken: bearerToken,
},
select: {
_id: true,
projectId: true,
autoProvisionUsers: true,
autoDeprovisionUsers: true,
teams: {
_id: true,
name: true,
},
},
props: {
isRoot: true,
},
},
);
if (projectScimConfig) {
// Store Project SCIM configuration
oneuptimeRequest.bearerTokenData = {
scimConfig: projectScimConfig,
projectId: projectScimConfig.projectId,
projectScimId: new ObjectID(scimId),
type: "project-scim",
};
return next();
}
// If not found, try Status Page SCIM configuration
const statusPageScimConfig: StatusPageSCIM | null = await StatusPageSCIMService.findOneBy(
{
query: {
_id: new ObjectID(scimId),
bearerToken: bearerToken,
},
select: {
_id: true,
projectId: true,
statusPageId: true,
autoProvisionUsers: true,
autoDeprovisionUsers: true,
},
props: {
isRoot: true,
},
},
);
if (statusPageScimConfig) {
// Store Status Page SCIM configuration
oneuptimeRequest.bearerTokenData = {
scimConfig: statusPageScimConfig,
projectId: statusPageScimConfig.projectId,
statusPageId: statusPageScimConfig.statusPageId,
statusPageScimId: new ObjectID(scimId),
type: "status-page-scim",
};
return next();
}
// If neither found, throw error
throw new NotAuthorizedException(
"Invalid bearer token or SCIM configuration not found",
);
} catch (err) {
return next(err);
}
}
}

View File

@@ -0,0 +1,27 @@
import CreateBy from "../Types/Database/CreateBy";
import { OnCreate } from "../Types/Database/Hooks";
import DatabaseService from "./DatabaseService";
import Model from "../../Models/DatabaseModels/ProjectSCIM";
import ObjectID from "../../Types/ObjectID";
export class Service extends DatabaseService<Model> {
public constructor() {
super(Model);
}
protected override async onBeforeCreate(
createBy: CreateBy<Model>,
): Promise<OnCreate<Model>> {
if (!createBy.data.bearerToken) {
// Generate a secure bearer token if not provided
createBy.data.bearerToken = ObjectID.generate().toString();
}
return {
createBy: createBy,
carryForward: {},
};
}
}
export default new Service();

View File

@@ -0,0 +1,27 @@
import CreateBy from "../Types/Database/CreateBy";
import { OnCreate } from "../Types/Database/Hooks";
import DatabaseService from "./DatabaseService";
import Model from "../../Models/DatabaseModels/StatusPageSCIM";
import ObjectID from "../../Types/ObjectID";
export class Service extends DatabaseService<Model> {
public constructor() {
super(Model);
}
protected override async onBeforeCreate(
createBy: CreateBy<Model>,
): Promise<OnCreate<Model>> {
if (!createBy.data.bearerToken) {
// Generate a secure bearer token if not provided
createBy.data.bearerToken = ObjectID.generate().toString();
}
return {
createBy: createBy,
carryForward: {},
};
}
}
export default new Service();

View File

@@ -137,7 +137,7 @@ export default class IncomingRequestCriteria {
input.dataToProcess.monitorId.toString() +
" is true",
);
return `Incoming request / heartbeat received in ${value} minutes.`;
return `Incoming request / heartbeat received in ${value} minutes. It was received ${differenceInMinutes} minutes ago.`;
}
return null;
}
@@ -153,7 +153,7 @@ export default class IncomingRequestCriteria {
input.dataToProcess.monitorId.toString() +
" is true",
);
return `Incoming request / heartbeat not received in ${value} minutes.`;
return `Incoming request / heartbeat not received in ${value} minutes. It was received ${differenceInMinutes} minutes ago.`;
}
return null;
}

View File

@@ -228,6 +228,8 @@ export default class MonitorResourceUtil {
await MonitorService.updateOneById({
id: monitor.id!,
data: {
incomingRequestMonitorHeartbeatCheckedAt:
OneUptimeDate.getCurrentDate(),
incomingMonitorRequest: {
...dataToProcess,
} as any,

View File

@@ -89,6 +89,16 @@ app.set("view engine", "ejs");
* https://stackoverflow.com/questions/19917401/error-request-entity-too-large
*/
// Handle SCIM content type before JSON middleware
app.use((req: ExpressRequest, _res: ExpressResponse, next: NextFunction) => {
const contentType: string | undefined = req.headers["content-type"];
if (contentType && contentType.includes("application/scim+json")) {
// Set content type to application/json so express.json() can parse it
req.headers["content-type"] = "application/json";
}
next();
});
app.use((req: OneUptimeRequest, res: ExpressResponse, next: NextFunction) => {
if (req.headers["content-encoding"] === "gzip") {
const buffers: any = [];

View File

@@ -0,0 +1,402 @@
import ProjectUtil from "Common/UI/Utils/Project";
import PageComponentProps from "../PageComponentProps";
import Banner from "Common/UI/Components/Banner/Banner";
import { ButtonStyleType } from "Common/UI/Components/Button/Button";
import FormFieldSchemaType from "Common/UI/Components/Forms/Types/FormFieldSchemaType";
import ConfirmModal from "Common/UI/Components/Modal/ConfirmModal";
import ModelTable from "Common/UI/Components/ModelTable/ModelTable";
import FieldType from "Common/UI/Components/Types/FieldType";
import HiddenText from "Common/UI/Components/HiddenText/HiddenText";
import ModelAPI from "Common/UI/Utils/ModelAPI/ModelAPI";
import API from "Common/UI/Utils/API/API";
import { IDENTITY_URL } from "Common/UI/Config";
import Navigation from "Common/UI/Utils/Navigation";
import ProjectSCIM from "Common/Models/DatabaseModels/ProjectSCIM";
import Team from "Common/Models/DatabaseModels/Team";
import ObjectID from "Common/Types/ObjectID";
import React, {
Fragment,
FunctionComponent,
ReactElement,
useState,
} from "react";
import IconProp from "Common/Types/Icon/IconProp";
import Route from "Common/Types/API/Route";
const SCIMPage: FunctionComponent<PageComponentProps> = (
_props: PageComponentProps,
): ReactElement => {
const [showSCIMUrlId, setShowSCIMUrlId] = useState<string>("");
const [currentSCIMConfig, setCurrentSCIMConfig] =
useState<ProjectSCIM | null>(null);
const [refresher, setRefresher] = useState<boolean>(false);
const [resetSCIMId, setResetSCIMId] = useState<string>("");
const [showResetModal, setShowResetModal] = useState<boolean>(false);
const [isResetLoading, setIsResetLoading] = useState<boolean>(false);
const [resetError, setResetError] = useState<string>("");
const [showResetErrorModal, setShowResetErrorModal] =
useState<boolean>(false);
const [showResetSuccessModal, setShowResetSuccessModal] =
useState<boolean>(false);
const [newBearerToken, setNewBearerToken] = useState<string>("");
const resetBearerToken: () => Promise<void> = async (): Promise<void> => {
setIsResetLoading(true);
try {
const newToken: ObjectID = ObjectID.generate();
await ModelAPI.updateById<ProjectSCIM>({
modelType: ProjectSCIM,
id: new ObjectID(resetSCIMId),
data: {
bearerToken: newToken.toString(),
},
});
setNewBearerToken(newToken.toString());
setShowResetModal(false);
setShowResetSuccessModal(true);
setRefresher(!refresher);
} catch (err) {
setResetError(API.getFriendlyMessage(err));
setShowResetErrorModal(true);
setShowResetModal(false);
}
setIsResetLoading(false);
};
return (
<Fragment>
<>
<Banner
openInNewTab={true}
title="Need help with configuring SCIM?"
description="Learn more about SCIM (System for Cross-domain Identity Management) setup and configuration"
link={Route.fromString("/docs/identity/scim")}
hideOnMobile={true}
/>
<ModelTable<ProjectSCIM>
key={refresher.toString()}
modelType={ProjectSCIM}
userPreferencesKey={"project-scim-table"}
query={{
projectId: ProjectUtil.getCurrentProjectId()!,
}}
id="scim-table"
name="Settings > Project SCIM"
isDeleteable={true}
isEditable={true}
isCreateable={true}
cardProps={{
title: "SCIM (System for Cross-domain Identity Management)",
description:
"SCIM is an open standard for automating the exchange of user identity information between identity domains, or IT systems. Use SCIM to automatically provision and deprovision users from your identity provider.",
}}
formSteps={[
{
title: "Basic Info",
id: "basic",
},
{
title: "Configuration",
id: "configuration",
},
{
title: "Teams",
id: "teams",
},
]}
noItemsMessage={"No SCIM configuration found."}
viewPageRoute={Navigation.getCurrentRoute()}
formFields={[
{
field: {
name: true,
},
title: "Name",
fieldType: FormFieldSchemaType.Text,
required: true,
description:
"Friendly name to help you remember this SCIM configuration.",
placeholder: "Okta SCIM",
validation: {
minLength: 2,
},
stepId: "basic",
},
{
field: {
description: true,
},
title: "Description",
fieldType: FormFieldSchemaType.LongText,
required: false,
description: "Optional description for this SCIM configuration.",
placeholder:
"SCIM configuration for automatic user provisioning from Okta",
stepId: "basic",
},
{
field: {
autoProvisionUsers: true,
},
title: "Auto Provision Users",
fieldType: FormFieldSchemaType.Checkbox,
required: false,
description:
"Automatically create users when they are added in your identity provider.",
stepId: "configuration",
},
{
field: {
autoDeprovisionUsers: true,
},
title: "Auto Deprovision Users",
fieldType: FormFieldSchemaType.Checkbox,
required: false,
description:
"Automatically remove users from teams when they are removed from your identity provider.",
stepId: "configuration",
},
{
field: {
teams: true,
},
title: "Default Teams",
fieldType: FormFieldSchemaType.MultiSelectDropdown,
dropdownModal: {
type: Team,
labelField: "name",
valueField: "_id",
},
required: false,
description:
"New users will be automatically added to these teams.",
stepId: "teams",
},
]}
columns={[
{
field: {
name: true,
},
title: "Name",
type: FieldType.Text,
},
{
field: {
autoProvisionUsers: true,
},
title: "Auto Provision",
type: FieldType.Boolean,
},
{
field: {
autoDeprovisionUsers: true,
},
title: "Auto Deprovision",
type: FieldType.Boolean,
},
]}
selectMoreFields={{
bearerToken: true,
createdAt: true,
updatedAt: true,
teams: {
name: true,
_id: true,
},
}}
filters={[
{
field: {
name: true,
},
title: "Name",
type: FieldType.Text,
},
]}
actionButtons={[
{
title: "View SCIM URLs",
buttonStyleType: ButtonStyleType.NORMAL,
onClick: async (
item: ProjectSCIM,
onCompleteAction: () => void,
_onError: (error: Error) => void,
) => {
onCompleteAction();
setCurrentSCIMConfig(item);
setShowSCIMUrlId(item.id?.toString() || "");
},
},
{
title: "Reset Bearer Token",
buttonStyleType: ButtonStyleType.OUTLINE,
icon: IconProp.Refresh,
onClick: async (
item: ProjectSCIM,
onCompleteAction: () => void,
_onError: (error: Error) => void,
) => {
onCompleteAction();
setResetSCIMId(item.id?.toString() || "");
setShowResetModal(true);
},
},
]}
/>
{showSCIMUrlId && currentSCIMConfig && (
<ConfirmModal
title={`SCIM Configuration URLs`}
description={
<div>
<p className="text-gray-500 mb-4">
Use these URLs to configure SCIM in your identity provider:
</p>
<div className="space-y-4">
<div>
<p className="font-medium text-gray-700 mb-1">
SCIM Base URL:
</p>
<code className="block p-2 bg-gray-100 rounded text-sm break-all">
{IDENTITY_URL.toString()}/scim/v2/{showSCIMUrlId}
</code>
<p className="text-xs text-gray-500 mt-1">
Use this as the SCIM endpoint URL in your identity
provider
</p>
</div>
<div>
<p className="font-medium text-gray-700 mb-1">
Service Provider Config URL:
</p>
<code className="block p-2 bg-gray-100 rounded text-sm break-all">
{IDENTITY_URL.toString()}/scim/v2/{showSCIMUrlId}
/ServiceProviderConfig
</code>
</div>
<div>
<p className="font-medium text-gray-700 mb-1">
Users Endpoint:
</p>
<code className="block p-2 bg-gray-100 rounded text-sm break-all">
{IDENTITY_URL.toString()}/scim/v2/{showSCIMUrlId}/Users
</code>
</div>
<div>
<p className="font-medium text-gray-700 mb-1">
Groups Endpoint:
</p>
<code className="block p-2 bg-gray-100 rounded text-sm break-all">
{IDENTITY_URL.toString()}/scim/v2/{showSCIMUrlId}/Groups
</code>
</div>
<div>
<p className="font-medium text-gray-700 mb-1">
Unique identifier field for users:
</p>
<code className="block p-2 bg-gray-100 rounded text-sm break-all">
userName
</code>
<p className="text-xs text-gray-500 mt-1">
Use this field as the unique identifier for users in your
identity provider SCIM configuration
</p>
</div>
<div className="border-t pt-4">
<p className="font-medium text-gray-700 mb-1">
Bearer Token:
</p>
<div className="mb-2">
<HiddenText
text={currentSCIMConfig.bearerToken || ""}
isCopyable={true}
/>
</div>
<p className="text-xs text-gray-500">
Use this bearer token for authentication in your identity
provider SCIM configuration.
</p>
</div>
</div>
</div>
}
submitButtonText={"Close"}
onSubmit={() => {
setShowSCIMUrlId("");
setCurrentSCIMConfig(null);
}}
submitButtonType={ButtonStyleType.NORMAL}
/>
)}
{/* Reset Bearer Token Modals */}
{showResetModal && (
<ConfirmModal
title="Reset Bearer Token"
description="Are you sure you want to reset the Bearer Token? You will need to update your identity provider with the new token."
onSubmit={async () => {
await resetBearerToken();
}}
isLoading={isResetLoading}
onClose={() => {
setShowResetModal(false);
setResetSCIMId("");
}}
submitButtonText="Reset"
submitButtonType={ButtonStyleType.DANGER}
/>
)}
{showResetErrorModal && (
<ConfirmModal
title="Reset Error"
description={resetError}
onSubmit={() => {
setShowResetErrorModal(false);
setResetError("");
setResetSCIMId("");
}}
submitButtonText="Close"
submitButtonType={ButtonStyleType.NORMAL}
/>
)}
{showResetSuccessModal && (
<ConfirmModal
title="New Bearer Token"
description={
<div>
<p className="mb-3">
Your new Bearer Token has been generated:
</p>
<div className="mb-2">
<HiddenText text={newBearerToken} isCopyable={true} />
</div>
<p className="text-sm text-gray-500">
Please update your identity provider with this new token.
</p>
</div>
}
onSubmit={() => {
setShowResetSuccessModal(false);
setNewBearerToken("");
setResetSCIMId("");
}}
submitButtonText="Close"
submitButtonType={ButtonStyleType.NORMAL}
/>
)}
</>
</Fragment>
);
};
export default SCIMPage;

View File

@@ -398,6 +398,15 @@ const DashboardSideMenu: () => JSX.Element = (): ReactElement => {
},
icon: IconProp.Lock,
},
{
link: {
title: "SCIM",
to: RouteUtil.populateRouteParams(
RouteMap[PageMap.SETTINGS_SCIM] as Route,
),
},
icon: IconProp.Refresh,
},
],
},
{

View File

@@ -0,0 +1,381 @@
import PageComponentProps from "../../PageComponentProps";
import URL from "Common/Types/API/URL";
import { VoidFunction } from "Common/Types/FunctionTypes";
import ObjectID from "Common/Types/ObjectID";
import Banner from "Common/UI/Components/Banner/Banner";
import { ButtonStyleType } from "Common/UI/Components/Button/Button";
import FormFieldSchemaType from "Common/UI/Components/Forms/Types/FormFieldSchemaType";
import ConfirmModal from "Common/UI/Components/Modal/ConfirmModal";
import ModelTable from "Common/UI/Components/ModelTable/ModelTable";
import FieldType from "Common/UI/Components/Types/FieldType";
import HiddenText from "Common/UI/Components/HiddenText/HiddenText";
import ModelAPI from "Common/UI/Utils/ModelAPI/ModelAPI";
import API from "Common/UI/Utils/API/API";
import { IDENTITY_URL } from "Common/UI/Config";
import Navigation from "Common/UI/Utils/Navigation";
import StatusPageSCIM from "Common/Models/DatabaseModels/StatusPageSCIM";
import React, {
Fragment,
FunctionComponent,
ReactElement,
useState,
} from "react";
import IconProp from "Common/Types/Icon/IconProp";
const SCIMPage: FunctionComponent<PageComponentProps> = (
_props: PageComponentProps,
): ReactElement => {
const modelId: ObjectID = Navigation.getLastParamAsObjectID(1);
const [showSCIMUrlId, setShowSCIMUrlId] = useState<string>("");
const [currentSCIMConfig, setCurrentSCIMConfig] =
useState<StatusPageSCIM | null>(null);
const [refresher, setRefresher] = useState<boolean>(false);
const [resetSCIMId, setResetSCIMId] = useState<string>("");
const [showResetModal, setShowResetModal] = useState<boolean>(false);
const [isResetLoading, setIsResetLoading] = useState<boolean>(false);
const [resetError, setResetError] = useState<string>("");
const [showResetErrorModal, setShowResetErrorModal] =
useState<boolean>(false);
const [showResetSuccessModal, setShowResetSuccessModal] =
useState<boolean>(false);
const [newBearerToken, setNewBearerToken] = useState<string>("");
const resetBearerToken: () => Promise<void> = async (): Promise<void> => {
setIsResetLoading(true);
try {
const newToken: ObjectID = ObjectID.generate();
await ModelAPI.updateById<StatusPageSCIM>({
modelType: StatusPageSCIM,
id: new ObjectID(resetSCIMId),
data: {
bearerToken: newToken.toString(),
},
});
setNewBearerToken(newToken.toString());
setShowResetModal(false);
setShowResetSuccessModal(true);
setRefresher(!refresher);
} catch (err) {
setResetError(API.getFriendlyMessage(err));
setShowResetErrorModal(true);
setShowResetModal(false);
}
setIsResetLoading(false);
};
return (
<Fragment>
<>
<Banner
openInNewTab={true}
title="Need help with configuring SCIM?"
description="Learn more about SCIM (System for Cross-domain Identity Management) setup and configuration for Status Pages"
link={URL.fromString("https://oneuptime.com/docs/status-page-scim")}
hideOnMobile={true}
/>
<ModelTable<StatusPageSCIM>
key={refresher.toString()}
modelType={StatusPageSCIM}
userPreferencesKey={"status-page-scim-table"}
query={{
statusPageId: modelId,
}}
id="status-page-scim-table"
name="Status Page > SCIM"
isDeleteable={true}
isEditable={true}
isCreateable={true}
cardProps={{
title: "SCIM (System for Cross-domain Identity Management)",
description:
"SCIM is an open standard for automating the exchange of user identity information between identity domains, or IT systems. Use SCIM to automatically provision and deprovision users with access to your private Status Page.",
}}
formSteps={[
{
title: "Basic Info",
id: "basic",
},
{
title: "Configuration",
id: "configuration",
},
]}
noItemsMessage={"No SCIM configuration found."}
viewPageRoute={Navigation.getCurrentRoute()}
formFields={[
{
field: {
name: true,
},
title: "Name",
fieldType: FormFieldSchemaType.Text,
required: true,
description:
"Friendly name to help you remember this SCIM configuration.",
placeholder: "Okta SCIM for Status Page",
validation: {
minLength: 2,
},
stepId: "basic",
},
{
field: {
description: true,
},
title: "Description",
fieldType: FormFieldSchemaType.LongText,
required: false,
description: "Optional description for this SCIM configuration.",
placeholder:
"SCIM configuration for automatic user provisioning to the Status Page from Okta",
stepId: "basic",
},
{
field: {
autoProvisionUsers: true,
},
title: "Auto Provision Users",
fieldType: FormFieldSchemaType.Checkbox,
required: false,
description:
"Automatically create users when they are added in your identity provider.",
stepId: "configuration",
},
{
field: {
autoDeprovisionUsers: true,
},
title: "Auto Deprovision Users",
fieldType: FormFieldSchemaType.Checkbox,
required: false,
description:
"Automatically remove users when they are removed from your identity provider.",
stepId: "configuration",
},
]}
showRefreshButton={true}
filters={[]}
columns={[
{
field: {
name: true,
},
title: "Name",
type: FieldType.Text,
},
{
field: {
description: true,
},
title: "Description",
type: FieldType.Text,
},
{
field: {
autoProvisionUsers: true,
},
title: "Auto Provision Users",
type: FieldType.Boolean,
},
{
field: {
autoDeprovisionUsers: true,
},
title: "Auto Deprovision Users",
type: FieldType.Boolean,
},
{
field: {
bearerToken: true,
},
title: "Bearer Token",
type: FieldType.HiddenText,
getElement: (item: StatusPageSCIM): ReactElement => {
return (
<HiddenText
text={item["bearerToken"] as string}
/>
);
},
},
{
field: {
createdAt: true,
},
title: "Created",
type: FieldType.DateTime,
},
]}
actionButtons={[
{
title: "Show SCIM Endpoint URLs",
buttonStyleType: ButtonStyleType.NORMAL,
icon: IconProp.List,
onClick: async (
item: StatusPageSCIM,
onCompleteAction: VoidFunction,
onError: (err: Error) => void,
) => {
try {
setCurrentSCIMConfig(item);
setShowSCIMUrlId(item["_id"] as string);
onCompleteAction();
} catch (err) {
onError(err as Error);
}
},
},
{
title: "Reset Bearer Token",
buttonStyleType: ButtonStyleType.DANGER_OUTLINE,
icon: IconProp.Refresh,
onClick: async (
item: StatusPageSCIM,
onCompleteAction: VoidFunction,
onError: (err: Error) => void,
) => {
try {
setResetSCIMId(item["_id"] as string);
setShowResetModal(true);
onCompleteAction();
} catch (err) {
onError(err as Error);
}
},
},
]}
/>
{showSCIMUrlId && currentSCIMConfig ? (
<ConfirmModal
title={`SCIM URLs - ${currentSCIMConfig.name}`}
description={
<div>
<p>
Configure your identity provider with these SCIM endpoint
URLs:
</p>
<br />
<div>
<strong>SCIM Base URL:</strong>
<br />
<code
style={{
backgroundColor: "#f4f4f4",
padding: "5px",
borderRadius: "3px",
fontSize: "12px",
}}
>
{IDENTITY_URL.toString()}/status-page-scim/v2/{showSCIMUrlId}
</code>
</div>
<br />
<div>
<strong>Users Endpoint:</strong>
<br />
<code
style={{
backgroundColor: "#f4f4f4",
padding: "5px",
borderRadius: "3px",
fontSize: "12px",
}}
>
{IDENTITY_URL.toString()}/status-page-scim/v2/{showSCIMUrlId}/Users
</code>
</div>
<br />
<div>
<strong>Bearer Token:</strong>
<br />
<HiddenText
text={currentSCIMConfig.bearerToken as string}
/>
</div>
<br />
<p>
<strong>Note:</strong> Make sure to use this bearer token in
the Authorization header when making SCIM API requests.
</p>
</div>
}
submitButtonText={"Close"}
onSubmit={() => {
setShowSCIMUrlId("");
setCurrentSCIMConfig(null);
}}
submitButtonType={ButtonStyleType.NORMAL}
/>
) : (
<></>
)}
{showResetModal ? (
<ConfirmModal
title={"Reset Bearer Token"}
description={
"Are you sure you want to reset the bearer token? This will invalidate the current token and you will need to update your identity provider with the new token."
}
submitButtonText={"Reset"}
onSubmit={resetBearerToken}
isLoading={isResetLoading}
submitButtonType={ButtonStyleType.DANGER}
onClose={() => {
setShowResetModal(false);
}}
/>
) : (
<></>
)}
{showResetErrorModal ? (
<ConfirmModal
title={"Error"}
description={resetError}
submitButtonText={"Close"}
onSubmit={() => {
setShowResetErrorModal(false);
}}
submitButtonType={ButtonStyleType.NORMAL}
/>
) : (
<></>
)}
{showResetSuccessModal ? (
<ConfirmModal
title={"Bearer Token Reset"}
description={
<div>
<p>Bearer token has been reset successfully.</p>
<br />
<div>
<strong>New Bearer Token:</strong>
<br />
<HiddenText text={newBearerToken} />
</div>
<br />
<p>
<strong>Important:</strong> Make sure to update your identity
provider with this new bearer token.
</p>
</div>
}
submitButtonText={"Close"}
onSubmit={() => {
setShowResetSuccessModal(false);
setNewBearerToken("");
}}
submitButtonType={ButtonStyleType.NORMAL}
/>
) : (
<></>
)}
</>
</Fragment>
);
};
export default SCIMPage;

View File

@@ -230,6 +230,17 @@ const DashboardSideMenu: FunctionComponent<ComponentProps> = (
icon={IconProp.Lock}
/>
<SideMenuItem
link={{
title: "SCIM",
to: RouteUtil.populateRouteParams(
RouteMap[PageMap.STATUS_PAGE_VIEW_SCIM] as Route,
{ modelId: props.modelId },
),
}}
icon={IconProp.Team}
/>
<SideMenuItem
link={{
title: "Authentication Settings",

View File

@@ -196,6 +196,12 @@ const SettingsSSO: LazyExoticComponent<FunctionComponent<ComponentProps>> =
lazy(() => {
return import("../Pages/Settings/SSO");
});
const SettingsSCIM: LazyExoticComponent<FunctionComponent<ComponentProps>> =
lazy(() => {
return import("../Pages/Settings/SCIM");
});
const SettingsSmsLog: LazyExoticComponent<FunctionComponent<ComponentProps>> =
lazy(() => {
return import("../Pages/Settings/SmsLog");
@@ -680,6 +686,18 @@ const SettingsRoutes: FunctionComponent<ComponentProps> = (
}
/>
<PageRoute
path={RouteUtil.getLastPathForKey(PageMap.SETTINGS_SCIM)}
element={
<Suspense fallback={Loader}>
<SettingsSCIM
{...props}
pageRoute={RouteMap[PageMap.SETTINGS_SCIM] as Route}
/>
</Suspense>
}
/>
<PageRoute
path={RouteUtil.getLastPathForKey(
PageMap.SETTINGS_INCIDENTS_SEVERITY,

View File

@@ -119,6 +119,11 @@ const StatusPageViewSSO: LazyExoticComponent<
> = lazy(() => {
return import("../Pages/StatusPages/View/SSO");
});
const StatusPageViewSCIM: LazyExoticComponent<
FunctionComponent<ComponentProps>
> = lazy(() => {
return import("../Pages/StatusPages/View/SCIM");
});
const StatusPageViewPrivateUser: LazyExoticComponent<
FunctionComponent<ComponentProps>
> = lazy(() => {
@@ -360,6 +365,18 @@ const StatusPagesRoutes: FunctionComponent<ComponentProps> = (
}
/>
<PageRoute
path={RouteUtil.getLastPathForKey(PageMap.STATUS_PAGE_VIEW_SCIM)}
element={
<Suspense fallback={Loader}>
<StatusPageViewSCIM
{...props}
pageRoute={RouteMap[PageMap.STATUS_PAGE_VIEW_SCIM] as Route}
/>
</Suspense>
}
/>
<PageRoute
path={RouteUtil.getLastPathForKey(
PageMap.STATUS_PAGE_VIEW_EMAIL_SUBSCRIBERS,

View File

@@ -211,6 +211,7 @@ enum PageMap {
STATUS_PAGE_VIEW_CUSTOM_FIELDS = "STATUS_PAGE_VIEW_CUSTOM_FIELDS",
STATUS_PAGE_VIEW_REPORTS = "STATUS_PAGE_VIEW_REPORTS",
STATUS_PAGE_VIEW_SSO = "STATUS_PAGE_VIEW_SSO",
STATUS_PAGE_VIEW_SCIM = "STATUS_PAGE_VIEW_SCIM",
STATUS_PAGE_VIEW_OWNERS = "STATUS_PAGE_VIEW_OWNERS",
STATUS_PAGE_VIEW_SETTINGS = "STATUS_PAGE_VIEW_SETTINGS",
@@ -338,6 +339,9 @@ enum PageMap {
// SSO.
SETTINGS_SSO = "SETTINGS_SSO",
// SCIM.
SETTINGS_SCIM = "SETTINGS_SCIM",
// Domains
SETTINGS_DOMAINS = "SETTINGS_DOMAINS",

View File

@@ -136,6 +136,7 @@ export const StatusPagesRoutePath: Dictionary<string> = {
[PageMap.STATUS_PAGE_VIEW_EMBEDDED]: `${RouteParams.ModelID}/embedded`,
[PageMap.STATUS_PAGE_VIEW_SUBSCRIBER_SETTINGS]: `${RouteParams.ModelID}/subscriber-settings`,
[PageMap.STATUS_PAGE_VIEW_SSO]: `${RouteParams.ModelID}/sso`,
[PageMap.STATUS_PAGE_VIEW_SCIM]: `${RouteParams.ModelID}/scim`,
[PageMap.STATUS_PAGE_VIEW_CUSTOM_HTML_CSS]: `${RouteParams.ModelID}/custom-code`,
[PageMap.STATUS_PAGE_VIEW_RESOURCES]: `${RouteParams.ModelID}/resources`,
[PageMap.STATUS_PAGE_VIEW_ADVANCED_OPTIONS]: `${RouteParams.ModelID}/advanced-options`,
@@ -244,6 +245,7 @@ export const SettingsRoutePath: Dictionary<string> = {
[PageMap.SETTINGS_DOMAINS]: "domains",
[PageMap.SETTINGS_FEATURE_FLAGS]: "feature-flags",
[PageMap.SETTINGS_SSO]: "sso",
[PageMap.SETTINGS_SCIM]: "scim",
[PageMap.SETTINGS_TEAMS]: "teams",
[PageMap.SETTINGS_USERS]: "users",
[PageMap.SETTINGS_USER_VIEW]: `users/${RouteParams.ModelID}`,
@@ -1096,6 +1098,12 @@ const RouteMap: Dictionary<Route> = {
}`,
),
[PageMap.STATUS_PAGE_VIEW_SCIM]: new Route(
`/dashboard/${RouteParams.ProjectID}/status-pages/${
StatusPagesRoutePath[PageMap.STATUS_PAGE_VIEW_SCIM]
}`,
),
[PageMap.STATUS_PAGE_VIEW_CUSTOM_HTML_CSS]: new Route(
`/dashboard/${RouteParams.ProjectID}/status-pages/${
StatusPagesRoutePath[PageMap.STATUS_PAGE_VIEW_CUSTOM_HTML_CSS]
@@ -1702,6 +1710,12 @@ const RouteMap: Dictionary<Route> = {
}`,
),
[PageMap.SETTINGS_SCIM]: new Route(
`/dashboard/${RouteParams.ProjectID}/settings/${
SettingsRoutePath[PageMap.SETTINGS_SCIM]
}`,
),
[PageMap.SETTINGS_TEAMS]: new Route(
`/dashboard/${RouteParams.ProjectID}/settings/${
SettingsRoutePath[PageMap.SETTINGS_TEAMS]

View File

@@ -0,0 +1,136 @@
# SCIM (System for Cross-domain Identity Management)
OneUptime supports SCIM v2.0 protocol for automated user provisioning and deprovisioning. SCIM enables identity providers (IdPs) like Azure AD, Okta, and other enterprise identity systems to automatically manage user access to OneUptime projects and status pages.
## Overview
SCIM integration provides the following benefits:
- **Automated User Provisioning**: Automatically create users in OneUptime when they're assigned in your IdP
- **Automated User Deprovisioning**: Automatically remove users from OneUptime when they're unassigned in your IdP
- **User Attribute Synchronization**: Keep user information synchronized between your IdP and OneUptime
- **Centralized Access Management**: Manage OneUptime access from your existing identity management system
## SCIM for Projects
Project SCIM allows identity providers to manage team members within OneUptime projects.
### Setting Up Project SCIM
1. **Navigate to Project Settings**
- Go to your OneUptime project
- Navigate to **Project Settings** > **Team** > **SCIM**
2. **Configure SCIM Settings**
- Enable **Auto Provision Users** to automatically add users when they're assigned in your IdP
- Enable **Auto Deprovision Users** to automatically remove users when they're unassigned in your IdP
- Select the **Default Teams** that new users should be added to
- Copy the **SCIM Base URL** and **Bearer Token** for your IdP configuration
3. **Configure Your Identity Provider**
- Use the SCIM Base URL: `https://oneuptime.com/scim/v2/{scimId}`
- Configure bearer token authentication with the provided token
- Map user attributes (email is required)
### Project SCIM Endpoints
- **Service Provider Config**: `GET /scim/v2/{scimId}/ServiceProviderConfig`
- **List Users**: `GET /scim/v2/{scimId}/Users`
- **Get User**: `GET /scim/v2/{scimId}/Users/{userId}`
- **Create User**: `POST /scim/v2/{scimId}/Users`
- **Update User**: `PUT /scim/v2/{scimId}/Users/{userId}`
- **Delete User**: `DELETE /scim/v2/{scimId}/Users/{userId}`
### Project SCIM User Lifecycle
1. **User Assignment in IdP**: When a user is assigned to OneUptime in your IdP
2. **SCIM Provisioning**: IdP calls OneUptime SCIM API to create the user
3. **Team Membership**: User is automatically added to configured default teams
4. **Access Granted**: User can now access the OneUptime project
5. **User Unassignment**: When user is unassigned in IdP
6. **SCIM Deprovisioning**: IdP calls OneUptime SCIM API to remove the user
7. **Access Revoked**: User loses access to the project
## SCIM for Status Pages
Status Page SCIM allows identity providers to manage subscribers to private status pages.
### Setting Up Status Page SCIM
1. **Navigate to Status Page Settings**
- Go to your OneUptime status page
- Navigate to **Status Page Settings** > **Private Users** > **SCIM**
2. **Configure SCIM Settings**
- Enable **Auto Provision Users** to automatically add subscribers when they're assigned in your IdP
- Enable **Auto Deprovision Users** to automatically remove subscribers when they're unassigned in your IdP
- Copy the **SCIM Base URL** and **Bearer Token** for your IdP configuration
3. **Configure Your Identity Provider**
- Use the SCIM Base URL: `https://oneuptime.com/status-page-scim/v2/{scimId}`
- Configure bearer token authentication with the provided token
- Map user attributes (email is required)
### Status Page SCIM Endpoints
- **Service Provider Config**: `GET /status-page-scim/v2/{scimId}/ServiceProviderConfig`
- **List Users**: `GET /status-page-scim/v2/{scimId}/Users`
- **Get User**: `GET /status-page-scim/v2/{scimId}/Users/{userId}`
- **Create User**: `POST /status-page-scim/v2/{scimId}/Users`
- **Update User**: `PUT /status-page-scim/v2/{scimId}/Users/{userId}`
- **Delete User**: `DELETE /status-page-scim/v2/{scimId}/Users/{userId}`
### Status Page SCIM User Lifecycle
1. **User Assignment in IdP**: When a user is assigned to OneUptime Status Page in your IdP
2. **SCIM Provisioning**: IdP calls OneUptime SCIM API to create the subscriber
3. **Access Granted**: User can now access the private status page
4. **User Unassignment**: When user is unassigned in IdP
5. **SCIM Deprovisioning**: IdP calls OneUptime SCIM API to remove the subscriber
6. **Access Revoked**: User loses access to the status page
## Identity Provider Configuration
### Azure Active Directory (Azure AD)
1. **Add OneUptime from Azure AD Gallery**
- In Azure AD, go to **Enterprise Applications** > **New Application**
- Search for "OneUptime" or add a **Non-gallery application**
2. **Configure SCIM Settings**
- In the OneUptime application, go to **Provisioning**
- Set **Provisioning Mode** to **Automatic**
- Enter the **Tenant URL** (SCIM Base URL from OneUptime)
- Enter the **Secret Token** (Bearer Token from OneUptime)
- Test the connection and save
3. **Configure Attribute Mappings**
- Map Azure AD attributes to OneUptime SCIM attributes
- Ensure `userPrincipalName` or `mail` is mapped to `userName`
- Configure any additional attribute mappings as needed
4. **Assign Users**
- Go to **Users and groups** and assign users to the OneUptime application
- Users will be automatically provisioned to OneUptime
### Okta
1. **Add OneUptime Application**
- In Okta Admin Console, go to **Applications** > **Add Application**
- Create a **Web** application or use **SCIM 2.0 Test App (Header Auth)**
2. **Configure SCIM Settings**
- In the application settings, go to **Provisioning**
- Set **SCIM connector base URL** to the OneUptime SCIM Base URL
- Set **Unique identifier field for users** to `userName`
- Enter the **Bearer Token** in the authentication header
3. **Configure Attribute Mappings**
- Map Okta user attributes to SCIM attributes
- Ensure `email` is mapped to `userName`
- Configure additional mappings as needed
4. **Assign Users**
- Assign users to the OneUptime application
- Users will be automatically provisioned to OneUptime

View File

@@ -78,9 +78,18 @@ const DocsNav: NavGroup[] = [
{
title: "IP Addresses",
url: "/docs/configuration/ip-addresses",
},
}
],
},
{
title: "Identity",
links: [
{
title: "SCIM API",
url: "/docs/identity/scim",
},
]
}
{
title: "Terraform Provider",
links: [

View File

@@ -1,23 +1,17 @@
import TelemetryIngest, {
TelemetryRequest,
} from "Common/Server/Middleware/TelemetryIngest";
import OneUptimeDate from "Common/Types/Date";
import { JSONObject } from "Common/Types/JSON";
import ProductType from "Common/Types/MeteredPlan/ProductType";
import LogService from "Common/Server/Services/LogService";
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import logger from "Common/Server/Utils/Logger";
import Response from "Common/Server/Utils/Response";
import Log from "Common/Models/AnalyticsModels/Log";
import LogSeverity from "Common/Types/Log/LogSeverity";
import OTelIngestService from "Common/Server/Services/OpenTelemetryIngestService";
import ObjectID from "Common/Types/ObjectID";
import JSONFunctions from "Common/Types/JSONFunctions";
import FluentIngestQueueService from "../Services/Queue/FluentIngestQueueService";
import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
import BadRequestException from "Common/Types/Exception/BadRequestException";
export class FluentRequestMiddleware {
public static async getProductType(
@@ -46,96 +40,108 @@ router.post(
next: NextFunction,
): Promise<void> => {
try {
logger.debug("Fluent ProbeIngest API called");
const dbLogs: Array<Log> = [];
let logItems: Array<JSONObject | string> | JSONObject = req.body as
| Array<JSONObject | string>
| JSONObject;
let oneuptimeServiceName: string | string[] | undefined =
req.headers["x-oneuptime-service-name"];
if (!oneuptimeServiceName) {
oneuptimeServiceName = "Unknown Service";
if (!(req as TelemetryRequest).projectId) {
throw new BadRequestException(
"Invalid request - projectId not found in request.",
);
}
const telemetryService: {
serviceId: ObjectID;
dataRententionInDays: number;
} = await OTelIngestService.telemetryServiceFromName({
serviceName: oneuptimeServiceName as string,
projectId: (req as TelemetryRequest).projectId,
req.body = req.body.toJSON ? req.body.toJSON() : req.body;
// Return response immediately
Response.sendEmptySuccessResponse(req, res);
// Add to queue for asynchronous processing
await FluentIngestQueueService.addFluentIngestJob(
req as TelemetryRequest,
);
return;
} catch (err) {
return next(err);
}
},
);
// Queue stats endpoint
router.get(
"/fluent/queue/stats",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const stats: {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
} = await FluentIngestQueueService.getQueueStats();
return Response.sendJsonObjectResponse(req, res, stats);
} catch (err) {
return next(err);
}
},
);
// Queue size endpoint
router.get(
"/fluent/queue/size",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const size: number = await FluentIngestQueueService.getQueueSize();
return Response.sendJsonObjectResponse(req, res, { size });
} catch (err) {
return next(err);
}
},
);
// Queue failed jobs endpoint
router.get(
"/fluent/queue/failed",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// Parse pagination parameters from query string
const start: number = parseInt(req.query["start"] as string) || 0;
const end: number = parseInt(req.query["end"] as string) || 100;
const failedJobs: Array<{
id: string;
name: string;
data: any;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}> = await FluentIngestQueueService.getFailedJobs({
start,
end,
});
if (
logItems &&
typeof logItems === "object" &&
(logItems as JSONObject)["json"]
) {
logItems = (logItems as JSONObject)["json"] as
| Array<JSONObject | string>
| JSONObject;
}
if (!Array.isArray(logItems)) {
logItems = [logItems];
}
for (let logItem of logItems) {
const dbLog: Log = new Log();
dbLog.projectId = (req as TelemetryRequest).projectId;
dbLog.serviceId = telemetryService.serviceId;
dbLog.severityNumber = 0;
const currentTimeAndDate: Date = OneUptimeDate.getCurrentDate();
dbLog.timeUnixNano = OneUptimeDate.toUnixNano(currentTimeAndDate);
dbLog.time = currentTimeAndDate;
dbLog.severityText = LogSeverity.Unspecified;
if (typeof logItem === "string") {
// check if its parseable to json
try {
logItem = JSON.parse(logItem);
} catch {
// do nothing
}
}
if (typeof logItem !== "string") {
logItem = JSON.stringify(logItem);
}
dbLog.body = logItem as string;
dbLogs.push(dbLog);
}
await LogService.createMany({
items: dbLogs,
props: {
isRoot: true,
return Response.sendJsonObjectResponse(req, res, {
failedJobs,
pagination: {
start,
end,
count: failedJobs.length,
},
});
OTelIngestService.recordDataIngestedUsgaeBilling({
services: {
[oneuptimeServiceName as string]: {
dataIngestedInGB: JSONFunctions.getSizeOfJSONinGB(req.body),
dataRententionInDays: telemetryService.dataRententionInDays,
serviceId: telemetryService.serviceId,
serviceName: oneuptimeServiceName as string,
},
},
projectId: (req as TelemetryRequest).projectId,
productType: ProductType.Logs,
}).catch((err: Error) => {
logger.error(err);
});
return Response.sendEmptySuccessResponse(req, res);
} catch (err) {
return next(err);
}

View File

@@ -0,0 +1,37 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import FluentIngestQueueService from "../Services/Queue/FluentIngestQueueService";
// import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
/**
* JSON metrics endpoint for KEDA autoscaling
* Returns queue size as JSON for KEDA metrics-api scaler
*/
router.get(
"/metrics/queue-size",
// ClusterKeyAuthorization.isAuthorizedServiceMiddleware, // Temporarily disabled for KEDA debugging
async (
_req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const queueSize: number = await FluentIngestQueueService.getQueueSize();
res.setHeader("Content-Type", "application/json");
res.status(200).json({
queueSize: queueSize,
});
} catch (err) {
return next(err);
}
},
);
export default router;

View File

@@ -1,4 +1,5 @@
import FluentIngestAPI from "./API/FluentIngest";
import MetricsAPI from "./API/Metrics";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
import PostgresAppInstance from "Common/Server/Infrastructure/PostgresDatabase";
@@ -9,12 +10,14 @@ import logger from "Common/Server/Utils/Logger";
import Realtime from "Common/Server/Utils/Realtime";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import "./Jobs/FluentIngest/ProcessFluentIngest";
const app: ExpressApplication = Express.getExpressApp();
const APP_NAME: string = "fluent-ingest";
app.use([`/${APP_NAME}`, "/"], FluentIngestAPI);
app.use([`/${APP_NAME}`, "/"], MetricsAPI);
const init: PromiseVoidFunction = async (): Promise<void> => {
try {

View File

@@ -0,0 +1,141 @@
import { FluentIngestJobData } from "../../Services/Queue/FluentIngestQueueService";
import logger from "Common/Server/Utils/Logger";
import { QueueJob, QueueName } from "Common/Server/Infrastructure/Queue";
import QueueWorker from "Common/Server/Infrastructure/QueueWorker";
import ObjectID from "Common/Types/ObjectID";
import OneUptimeDate from "Common/Types/Date";
import { JSONObject } from "Common/Types/JSON";
import ProductType from "Common/Types/MeteredPlan/ProductType";
import LogService from "Common/Server/Services/LogService";
import LogSeverity from "Common/Types/Log/LogSeverity";
import OTelIngestService from "Common/Server/Services/OpenTelemetryIngestService";
import JSONFunctions from "Common/Types/JSONFunctions";
import Log from "Common/Models/AnalyticsModels/Log";
interface FluentIngestProcessData {
projectId: ObjectID;
requestBody: JSONObject;
requestHeaders: JSONObject;
}
// Set up the worker for processing fluent ingest queue
QueueWorker.getWorker(
QueueName.FluentIngest,
async (job: QueueJob): Promise<void> => {
logger.debug(`Processing fluent ingestion job: ${job.name}`);
try {
const jobData: FluentIngestJobData = job.data as FluentIngestJobData;
// Pass job data directly to processing function
await processFluentIngestFromQueue({
projectId: new ObjectID(jobData.projectId),
requestBody: jobData.requestBody,
requestHeaders: jobData.requestHeaders,
});
logger.debug(`Successfully processed fluent ingestion job: ${job.name}`);
} catch (error) {
logger.error(`Error processing fluent ingestion job:`);
logger.error(error);
throw error;
}
},
{ concurrency: 20 }, // Process up to 20 fluent ingest jobs concurrently
);
async function processFluentIngestFromQueue(
data: FluentIngestProcessData,
): Promise<void> {
const dbLogs: Array<Log> = [];
let logItems: Array<JSONObject | string> | JSONObject = data.requestBody as
| Array<JSONObject | string>
| JSONObject;
let oneuptimeServiceName: string | string[] | undefined = data.requestHeaders[
"x-oneuptime-service-name"
] as string | string[] | undefined;
if (!oneuptimeServiceName) {
oneuptimeServiceName = "Unknown Service";
}
const telemetryService: {
serviceId: ObjectID;
dataRententionInDays: number;
} = await OTelIngestService.telemetryServiceFromName({
serviceName: oneuptimeServiceName as string,
projectId: data.projectId,
});
if (
logItems &&
typeof logItems === "object" &&
(logItems as JSONObject)["json"]
) {
logItems = (logItems as JSONObject)["json"] as
| Array<JSONObject | string>
| JSONObject;
}
if (!Array.isArray(logItems)) {
logItems = [logItems];
}
for (let logItem of logItems) {
const dbLog: Log = new Log();
dbLog.projectId = data.projectId;
dbLog.serviceId = telemetryService.serviceId;
dbLog.severityNumber = 0;
const currentTimeAndDate: Date = OneUptimeDate.getCurrentDate();
dbLog.timeUnixNano = OneUptimeDate.toUnixNano(currentTimeAndDate);
dbLog.time = currentTimeAndDate;
dbLog.severityText = LogSeverity.Unspecified;
if (typeof logItem === "string") {
// check if its parseable to json
try {
logItem = JSON.parse(logItem);
} catch {
// do nothing
}
}
if (typeof logItem !== "string") {
logItem = JSON.stringify(logItem);
}
dbLog.body = logItem as string;
dbLogs.push(dbLog);
}
await LogService.createMany({
items: dbLogs,
props: {
isRoot: true,
},
});
OTelIngestService.recordDataIngestedUsgaeBilling({
services: {
[oneuptimeServiceName as string]: {
dataIngestedInGB: JSONFunctions.getSizeOfJSONinGB(
data.requestBody as JSONObject,
),
dataRententionInDays: telemetryService.dataRententionInDays,
serviceId: telemetryService.serviceId,
serviceName: oneuptimeServiceName as string,
},
},
projectId: data.projectId,
productType: ProductType.Logs,
}).catch((err: Error) => {
logger.error(err);
});
}
logger.debug("Fluent ingest worker initialized");

View File

@@ -0,0 +1,73 @@
import { TelemetryRequest } from "Common/Server/Middleware/TelemetryIngest";
import Queue, { QueueName } from "Common/Server/Infrastructure/Queue";
import { JSONObject } from "Common/Types/JSON";
import OneUptimeDate from "Common/Types/Date";
import logger from "Common/Server/Utils/Logger";
export interface FluentIngestJobData {
projectId: string;
requestBody: JSONObject;
requestHeaders: Record<string, string>;
ingestionTimestamp: Date;
}
export default class FluentIngestQueueService {
public static async addFluentIngestJob(req: TelemetryRequest): Promise<void> {
try {
const jobData: FluentIngestJobData = {
projectId: req.projectId.toString(),
requestBody: req.body,
requestHeaders: req.headers as Record<string, string>,
ingestionTimestamp: OneUptimeDate.getCurrentDate(),
};
const jobId: string = `fluent-${req.projectId?.toString()}-${OneUptimeDate.getCurrentDateAsUnixNano()}`;
await Queue.addJob(
QueueName.FluentIngest,
jobId,
"ProcessFluentIngest",
jobData as unknown as JSONObject,
);
logger.debug(`Added fluent ingestion job: ${jobId}`);
} catch (error) {
logger.error(`Error adding fluent ingestion job:`);
logger.error(error);
throw error;
}
}
public static async getQueueSize(): Promise<number> {
return Queue.getQueueSize(QueueName.FluentIngest);
}
public static async getQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
}> {
return Queue.getQueueStats(QueueName.FluentIngest);
}
public static getFailedJobs(options?: {
start?: number;
end?: number;
}): Promise<
Array<{
id: string;
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}>
> {
return Queue.getFailedJobs(QueueName.FluentIngest, options);
}
}

View File

@@ -98,39 +98,39 @@ Usage:
value: {{ $.Release.Name }}-docs.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: APP_PORT
value: {{ $.Values.port.app | squote }}
value: {{ $.Values.app.ports.http | squote }}
- name: PROBE_INGEST_PORT
value: {{ $.Values.port.probeIngest | squote }}
value: {{ $.Values.probeIngest.ports.http | squote }}
- name: SERVER_MONITOR_INGEST_PORT
value: {{ $.Values.port.serverMonitorIngest | squote }}
value: {{ $.Values.serverMonitorIngest.ports.http | squote }}
- name: OPEN_TELEMETRY_INGEST_PORT
value: {{ $.Values.port.openTelemetryIngest | squote }}
value: {{ $.Values.openTelemetryIngest.ports.http | squote }}
- name: INCOMING_REQUEST_INGEST_PORT
value: {{ $.Values.port.incomingRequestIngest | squote }}
value: {{ $.Values.incomingRequestIngest.ports.http | squote }}
- name: FLUENT_INGEST_PORT
value: {{ $.Values.port.fluentIngest | squote }}
value: {{ $.Values.fluentIngest.ports.http | squote }}
- name: TEST_SERVER_PORT
value: {{ $.Values.port.testServer | squote }}
value: {{ $.Values.testServer.ports.http | squote }}
- name: ACCOUNTS_PORT
value: {{ $.Values.port.accounts | squote }}
value: {{ $.Values.accounts.ports.http | squote }}
- name: ISOLATED_VM_PORT
value: {{ $.Values.port.isolatedVM | squote }}
value: {{ $.Values.isolatedVM.ports.http | squote }}
- name: HOME_PORT
value: {{ $.Values.port.home | squote }}
value: {{ $.Values.home.ports.http | squote }}
- name: WORKER_PORT
value: {{ $.Values.port.worker | squote }}
value: {{ $.Values.worker.ports.http | squote }}
- name: WORKFLOW_PORT
value: {{ $.Values.port.workflow | squote }}
value: {{ $.Values.workflow.ports.http | squote }}
- name: STATUS_PAGE_PORT
value: {{ $.Values.port.statusPage | squote }}
value: {{ $.Values.statusPage.ports.http | squote }}
- name: DASHBOARD_PORT
value: {{ $.Values.port.dashboard | squote }}
value: {{ $.Values.dashboard.ports.http | squote }}
- name: ADMIN_DASHBOARD_PORT
value: {{ $.Values.port.adminDashboard | squote }}
value: {{ $.Values.adminDashboard.ports.http | squote }}
- name: API_REFERENCE_PORT
value: {{ $.Values.port.apiReference | squote }}
value: {{ $.Values.apiReference.ports.http | squote }}
- name: DOCS_PORT
value: {{ $.Values.port.docs | squote }}
value: {{ $.Values.docs.ports.http | squote }}
{{- end }}
@@ -559,9 +559,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name $.ServiceName }}
{{- if $.ReplicaCount }}
replicas: {{ $.ReplicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.DisableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -695,3 +699,86 @@ spec:
requests:
storage: {{ $.Storage }}
{{- end }}
{{/*
KEDA ScaledObject template for metric-based autoscaling
Usage: include "oneuptime.kedaScaledObject" (dict "ServiceName" "service-name" "Release" .Release "Values" .Values "MetricsConfig" {...})
*/}}
{{- define "oneuptime.kedaScaledObject" }}
{{- if and .Values.keda.enabled .MetricsConfig.enabled (not .DisableAutoscaler) }}
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: {{ printf "%s-%s-scaledobject" .Release.Name .ServiceName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name .ServiceName }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
appname: oneuptime
spec:
scaleTargetRef:
name: {{ printf "%s-%s" .Release.Name .ServiceName }}
minReplicaCount: {{ .MetricsConfig.minReplicas }}
maxReplicaCount: {{ .MetricsConfig.maxReplicas }}
pollingInterval: {{ .MetricsConfig.pollingInterval }}
cooldownPeriod: {{ .MetricsConfig.cooldownPeriod }}
advanced:
horizontalPodAutoscalerConfig:
behavior:
scaleUp:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 120
- type: Pods
value: 2
periodSeconds: 120
selectPolicy: Min
scaleDown:
stabilizationWindowSeconds: 600
policies:
- type: Percent
value: 10
periodSeconds: 180
- type: Pods
value: 1
periodSeconds: 180
selectPolicy: Min
triggers:
{{- range .MetricsConfig.triggers }}
- type: metrics-api
metadata:
targetValue: {{ .threshold | quote }}
url: http://{{ printf "%s-%s" $.Release.Name $.ServiceName }}:{{ .port }}/metrics/queue-size
valueLocation: 'queueSize'
method: 'GET'
# authenticationRef:
# name: {{ printf "%s-%s-trigger-auth" $.Release.Name $.ServiceName }}
{{- end }}
---
apiVersion: keda.sh/v1alpha1
kind: TriggerAuthentication
metadata:
name: {{ printf "%s-%s-trigger-auth" .Release.Name .ServiceName }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name .ServiceName }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
appname: oneuptime
spec:
secretTargetRef:
{{- if .Values.externalSecrets.oneuptimeSecret.existingSecret.name }}
- parameter: clusterkey
name: {{ .Values.externalSecrets.oneuptimeSecret.existingSecret.name }}
key: {{ .Values.externalSecrets.oneuptimeSecret.existingSecret.passwordKey }}
{{- else }}
- parameter: clusterkey
name: {{ printf "%s-%s" .Release.Name "secrets" }}
key: oneuptime-secret
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,12 +1,12 @@
# OneUptime accounts Deployment
{{- $accountsEnv := dict "PORT" $.Values.port.accounts "DISABLE_TELEMETRY" $.Values.accounts.disableTelemetryCollection -}}
{{- $accountsPorts := dict "port" $.Values.port.accounts -}}
{{- $accountsDeploymentArgs :=dict "IsUI" true "ServiceName" "accounts" "Ports" $accountsPorts "Release" $.Release "Values" $.Values "Env" $accountsEnv "Resources" $.Values.accounts.resources "DisableAutoscaler" $.Values.accounts.disableAutoscaler -}}
{{- $accountsEnv := dict "PORT" $.Values.accounts.ports.http "DISABLE_TELEMETRY" $.Values.accounts.disableTelemetryCollection -}}
{{- $accountsPorts := $.Values.accounts.ports -}}
{{- $accountsDeploymentArgs :=dict "IsUI" true "ServiceName" "accounts" "Ports" $accountsPorts "Release" $.Release "Values" $.Values "Env" $accountsEnv "Resources" $.Values.accounts.resources "DisableAutoscaler" $.Values.accounts.disableAutoscaler "ReplicaCount" $.Values.accounts.replicaCount -}}
{{- include "oneuptime.deployment" $accountsDeploymentArgs }}
---
# OneUptime accounts Service
{{- $accountsPorts := dict "port" $.Values.port.accounts -}}
{{- $accountsPorts := $.Values.accounts.ports -}}
{{- $accountsServiceArgs := dict "ServiceName" "accounts" "Ports" $accountsPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $accountsServiceArgs }}
---

View File

@@ -1,12 +1,12 @@
# OneUptime adminDashboard Deployment
{{- $adminDashboardEnv := dict "PORT" $.Values.port.adminDashboard "DISABLE_TELEMETRY" $.Values.adminDashboard.disableTelemetryCollection -}}
{{- $adminDashboardPorts := dict "port" $.Values.port.adminDashboard -}}
{{- $adminDashboardDeploymentArgs :=dict "IsUI" true "ServiceName" "admin-dashboard" "Ports" $adminDashboardPorts "Release" $.Release "Values" $.Values "Env" $adminDashboardEnv "Resources" $.Values.adminDashboard.resources "DisableAutoscaler" $.Values.adminDashboard.disableAutoscaler -}}
# OneUptime admin-dashboard Deployment
{{- $adminDashboardEnv := dict "PORT" $.Values.adminDashboard.ports.http "DISABLE_TELEMETRY" $.Values.adminDashboard.disableTelemetryCollection -}}
{{- $adminDashboardPorts := $.Values.adminDashboard.ports -}}
{{- $adminDashboardDeploymentArgs :=dict "IsUI" true "ServiceName" "admin-dashboard" "Ports" $adminDashboardPorts "Release" $.Release "Values" $.Values "Env" $adminDashboardEnv "Resources" $.Values.adminDashboard.resources "DisableAutoscaler" $.Values.adminDashboard.disableAutoscaler "ReplicaCount" $.Values.adminDashboard.replicaCount -}}
{{- include "oneuptime.deployment" $adminDashboardDeploymentArgs }}
---
# OneUptime adminDashboard Service
{{- $adminDashboardPorts := dict "port" $.Values.port.adminDashboard -}}
# OneUptime admin-dashboard Service
{{- $adminDashboardPorts := $.Values.adminDashboard.ports -}}
{{- $adminDashboardServiceArgs := dict "ServiceName" "admin-dashboard" "Ports" $adminDashboardPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $adminDashboardServiceArgs }}
---

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "api-reference" }}
{{- if $.Values.apiReference.replicaCount }}
replicas: {{ $.Values.apiReference.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.apiReference.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -48,7 +52,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.apiReference }}
port: {{ $.Values.apiReference.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -57,7 +61,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.apiReference }}
port: {{ $.Values.apiReference.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -67,7 +71,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.apiReference }}
port: {{ $.Values.apiReference.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -81,11 +85,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.apiReference | quote }}
value: {{ $.Values.apiReference.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.apiReference.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.apiReference }}
- containerPort: {{ $.Values.apiReference.ports.http }}
protocol: TCP
name: http
{{- if $.Values.apiReference.resources }}
@@ -97,7 +101,8 @@ spec:
---
# OneUptime app Service
{{- $apiReferencePorts := dict "port" $.Values.port.apiReference -}}
# OneUptime apiReference Service
{{- $apiReferencePorts := dict "port" $.Values.apiReference.ports.http -}}
{{- $apiReferenceServiceArgs := dict "ServiceName" "api-reference" "Ports" $apiReferencePorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $apiReferenceServiceArgs }}
---

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "app" }}
{{- if $.Values.app.replicaCount }}
replicas: {{ $.Values.app.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.app.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -49,7 +53,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.app }}
port: {{ $.Values.app.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -58,7 +62,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.app }}
port: {{ $.Values.app.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -68,7 +72,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.app }}
port: {{ $.Values.app.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -82,7 +86,7 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.app | quote }}
value: {{ $.Values.app.ports.http | quote }}
- name: SMS_HIGH_RISK_COST_IN_CENTS
value: {{ $.Values.billing.smsHighRiskValueInCents | quote }}
- name: CALL_HIGH_RISK_COST_IN_CENTS_PER_MINUTE
@@ -95,7 +99,7 @@ spec:
value: {{ $.Values.app.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.app }}
- containerPort: {{ $.Values.app.ports.http }}
protocol: TCP
name: http
{{- if $.Values.app.resources }}
@@ -107,7 +111,7 @@ spec:
---
# OneUptime app Service
{{- $appPorts := dict "port" $.Values.port.app -}}
{{- $appPorts := dict "port" $.Values.app.ports.http -}}
{{- $appServiceArgs := dict "ServiceName" "app" "Ports" $appPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $appServiceArgs }}
---

View File

@@ -1,12 +1,12 @@
# OneUptime dashboard Deployment
{{- $dashboardPorts := dict "port" $.Values.port.dashboard -}}
{{- $dashboardEnv := dict "PORT" $.Values.port.dashboard "DISABLE_TELEMETRY" $.Values.dashboard.disableTelemetryCollection -}}
{{- $dashboardDeploymentArgs :=dict "IsUI" true "ServiceName" "dashboard" "Ports" $dashboardPorts "Release" $.Release "Values" $.Values "Env" $dashboardEnv "Resources" $.Values.dashboard.resources "DisableAutoscaler" $.Values.dashboard.disableAutoscaler -}}
{{- $dashboardPorts := $.Values.dashboard.ports -}}
{{- $dashboardEnv := dict "PORT" $.Values.dashboard.ports.http "DISABLE_TELEMETRY" $.Values.dashboard.disableTelemetryCollection -}}
{{- $dashboardDeploymentArgs :=dict "IsUI" true "ServiceName" "dashboard" "Ports" $dashboardPorts "Release" $.Release "Values" $.Values "Env" $dashboardEnv "Resources" $.Values.dashboard.resources "DisableAutoscaler" $.Values.dashboard.disableAutoscaler "ReplicaCount" $.Values.dashboard.replicaCount -}}
{{- include "oneuptime.deployment" $dashboardDeploymentArgs }}
---
# OneUptime dashboard Service
{{- $dashboardPorts := dict "port" $.Values.port.dashboard -}}
{{- $dashboardPorts := $.Values.dashboard.ports -}}
{{- $dashboardServiceArgs := dict "ServiceName" "dashboard" "Ports" $dashboardPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $dashboardServiceArgs }}
---

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "docs" }}
{{- if $.Values.docs.replicaCount }}
replicas: {{ $.Values.docs.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.docs.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -48,7 +52,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.docs }}
port: {{ $.Values.docs.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -57,7 +61,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.docs }}
port: {{ $.Values.docs.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -67,7 +71,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.docs }}
port: {{ $.Values.docs.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -81,11 +85,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.docs | quote }}
value: {{ $.Values.docs.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.docs.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.docs }}
- containerPort: {{ $.Values.docs.ports.http }}
protocol: TCP
name: http
{{- if $.Values.docs.resources }}
@@ -97,7 +101,8 @@ spec:
---
# OneUptime app Service
{{- $docsPorts := dict "port" $.Values.port.docs -}}
# OneUptime docs Service
{{- $docsPorts := dict "port" $.Values.docs.ports.http -}}
{{- $docsServiceArgs := dict "ServiceName" "docs" "Ports" $docsPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $docsServiceArgs }}
---

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "fluent-ingest" }}
{{- if $.Values.deployment.fluentIngest.replicaCount }}
replicas: {{ $.Values.deployment.fluentIngest.replicaCount }}
{{- if $.Values.fluentIngest.replicaCount }}
replicas: {{ $.Values.fluentIngest.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.fluentIngest.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -57,7 +57,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.fluentIngest }}
port: {{ $.Values.fluentIngest.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -66,7 +66,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.fluentIngest }}
port: {{ $.Values.fluentIngest.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -76,7 +76,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.fluentIngest }}
port: {{ $.Values.fluentIngest.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -90,11 +90,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.fluentIngest | quote }}
value: {{ $.Values.fluentIngest.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.fluentIngest.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.fluentIngest }}
- containerPort: {{ $.Values.fluentIngest.ports.http }}
protocol: TCP
name: http
{{- if $.Values.fluentIngest.resources }}
@@ -106,13 +106,13 @@ spec:
---
# OneUptime fluent-ingest Service
{{- $fluentIngestPorts := dict "port" $.Values.port.fluentIngest -}}
{{- $fluentIngestPorts := dict "port" $.Values.fluentIngest.ports.http -}}
{{- $fluentIngestServiceArgs := dict "ServiceName" "fluent-ingest" "Ports" $fluentIngestPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $fluentIngestServiceArgs }}
---
# OneUptime fluent-ingest autoscaler
{{- if not $.Values.fluentIngest.disableAutoscaler }}
{{- if and (not $.Values.fluentIngest.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.fluentIngest.keda.enabled)) }}
{{- $fluentIngestAutoScalerArgs := dict "ServiceName" "fluent-ingest" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $fluentIngestAutoScalerArgs }}
{{- end }}

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "home" }}
{{- if $.Values.home.replicaCount }}
replicas: {{ $.Values.home.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.home.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -48,7 +52,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.home }}
port: {{ $.Values.home.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -57,7 +61,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.home }}
port: {{ $.Values.home.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -67,7 +71,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.home }}
port: {{ $.Values.home.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -81,11 +85,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.home | quote }}
value: {{ $.Values.home.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.home.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.home }}
- containerPort: {{ $.Values.home.ports.http }}
protocol: TCP
name: http
{{- if $.Values.home.resources }}
@@ -97,7 +101,7 @@ spec:
---
# OneUptime app Service
{{- $homePorts := dict "port" $.Values.port.home -}}
{{- $homePorts := $.Values.home.ports -}}
{{- $homeServiceArgs := dict "ServiceName" "home" "Ports" $homePorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $homeServiceArgs }}
---

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "incoming-request-ingest" }}
{{- if $.Values.deployment.incomingRequestIngest.replicaCount }}
replicas: {{ $.Values.deployment.incomingRequestIngest.replicaCount }}
{{- if $.Values.incomingRequestIngest.replicaCount }}
replicas: {{ $.Values.incomingRequestIngest.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.incomingRequestIngest.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -57,7 +57,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.incomingRequestIngest }}
port: {{ $.Values.incomingRequestIngest.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -66,7 +66,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.incomingRequestIngest }}
port: {{ $.Values.incomingRequestIngest.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -76,7 +76,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.incomingRequestIngest }}
port: {{ $.Values.incomingRequestIngest.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -90,11 +90,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.incomingRequestIngest | quote }}
value: {{ $.Values.incomingRequestIngest.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.incomingRequestIngest.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.incomingRequestIngest }}
- containerPort: {{ $.Values.incomingRequestIngest.ports.http }}
protocol: TCP
name: http
{{- if $.Values.incomingRequestIngest.resources }}
@@ -106,13 +106,13 @@ spec:
---
# OneUptime incoming-request-ingest Service
{{- $incomingRequestIngestPorts := dict "port" $.Values.port.incomingRequestIngest -}}
{{- $incomingRequestIngestPorts := dict "port" $.Values.incomingRequestIngest.ports.http -}}
{{- $incomingRequestIngestServiceArgs := dict "ServiceName" "incoming-request-ingest" "Ports" $incomingRequestIngestPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $incomingRequestIngestServiceArgs }}
---
# OneUptime incoming-request-ingest autoscaler
{{- if not $.Values.incomingRequestIngest.disableAutoscaler }}
{{- if and (not $.Values.incomingRequestIngest.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.incomingRequestIngest.keda.enabled)) }}
{{- $incomingRequestIngestAutoScalerArgs := dict "ServiceName" "incoming-request-ingest" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $incomingRequestIngestAutoScalerArgs }}
{{- end }}

View File

@@ -15,9 +15,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "isolated-vm" }}
{{- if $.Values.isolatedVM.replicaCount }}
replicas: {{ $.Values.isolatedVM.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.isolatedVM.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -52,12 +56,12 @@ spec:
{{- include "oneuptime.env.common" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.isolatedVM | quote }}
value: {{ $.Values.isolatedVM.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.isolatedVM.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.isolatedVM }}
- containerPort: {{ $.Values.isolatedVM.ports.http }}
protocol: TCP
name: http
{{- if $.Values.isolatedVM.resources }}
@@ -69,7 +73,7 @@ spec:
---
# OneUptime isolatedVM Service
{{- $isolatedVMPorts := dict "port" $.Values.port.isolatedVM -}}
{{- $isolatedVMPorts := $.Values.isolatedVM.ports -}}
{{- $isolatedVMServiceArgs := dict "ServiceName" "isolated-vm" "Ports" $isolatedVMPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $isolatedVMServiceArgs }}
---

View File

@@ -0,0 +1,52 @@
{{/*
KEDA ScaledObjects for various services
*/}}
{{/* OpenTelemetry Ingest KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.openTelemetryIngest.keda.enabled (not .Values.openTelemetryIngest.disableAutoscaler) }}
{{- $metricsConfig := dict "enabled" .Values.openTelemetryIngest.keda.enabled "minReplicas" .Values.openTelemetryIngest.keda.minReplicas "maxReplicas" .Values.openTelemetryIngest.keda.maxReplicas "pollingInterval" .Values.openTelemetryIngest.keda.pollingInterval "cooldownPeriod" .Values.openTelemetryIngest.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_telemetry_queue_size" "threshold" .Values.openTelemetryIngest.keda.queueSizeThreshold "port" .Values.openTelemetryIngest.ports.http)) }}
{{- $openTelemetryIngestKedaArgs := dict "ServiceName" "open-telemetry-ingest" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.openTelemetryIngest.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $openTelemetryIngestKedaArgs }}
{{- end }}
{{/* Fluent Ingest KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.fluentIngest.keda.enabled (not .Values.fluentIngest.disableAutoscaler) }}
{{- $metricsConfig := dict "enabled" .Values.fluentIngest.keda.enabled "minReplicas" .Values.fluentIngest.keda.minReplicas "maxReplicas" .Values.fluentIngest.keda.maxReplicas "pollingInterval" .Values.fluentIngest.keda.pollingInterval "cooldownPeriod" .Values.fluentIngest.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_fluent_ingest_queue_size" "threshold" .Values.fluentIngest.keda.queueSizeThreshold "port" .Values.fluentIngest.ports.http)) }}
{{- $fluentIngestKedaArgs := dict "ServiceName" "fluent-ingest" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.fluentIngest.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $fluentIngestKedaArgs }}
{{- end }}
{{/* Incoming Request Ingest KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.incomingRequestIngest.keda.enabled (not .Values.incomingRequestIngest.disableAutoscaler) }}
{{- $metricsConfig := dict "enabled" .Values.incomingRequestIngest.keda.enabled "minReplicas" .Values.incomingRequestIngest.keda.minReplicas "maxReplicas" .Values.incomingRequestIngest.keda.maxReplicas "pollingInterval" .Values.incomingRequestIngest.keda.pollingInterval "cooldownPeriod" .Values.incomingRequestIngest.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_incoming_request_ingest_queue_size" "threshold" .Values.incomingRequestIngest.keda.queueSizeThreshold "port" .Values.incomingRequestIngest.ports.http)) }}
{{- $incomingRequestIngestKedaArgs := dict "ServiceName" "incoming-request-ingest" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.incomingRequestIngest.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $incomingRequestIngestKedaArgs }}
{{- end }}
{{/* Server Monitor Ingest KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.serverMonitorIngest.keda.enabled (not .Values.serverMonitorIngest.disableAutoscaler) }}
{{- $metricsConfig := dict "enabled" .Values.serverMonitorIngest.keda.enabled "minReplicas" .Values.serverMonitorIngest.keda.minReplicas "maxReplicas" .Values.serverMonitorIngest.keda.maxReplicas "pollingInterval" .Values.serverMonitorIngest.keda.pollingInterval "cooldownPeriod" .Values.serverMonitorIngest.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_server_monitor_ingest_queue_size" "threshold" .Values.serverMonitorIngest.keda.queueSizeThreshold "port" .Values.serverMonitorIngest.ports.http)) }}
{{- $serverMonitorIngestKedaArgs := dict "ServiceName" "server-monitor-ingest" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.serverMonitorIngest.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $serverMonitorIngestKedaArgs }}
{{- end }}
{{/* Probe Ingest KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.probeIngest.keda.enabled (not .Values.probeIngest.disableAutoscaler) }}
{{- $metricsConfig := dict "enabled" .Values.probeIngest.keda.enabled "minReplicas" .Values.probeIngest.keda.minReplicas "maxReplicas" .Values.probeIngest.keda.maxReplicas "pollingInterval" .Values.probeIngest.keda.pollingInterval "cooldownPeriod" .Values.probeIngest.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_probe_ingest_queue_size" "threshold" .Values.probeIngest.keda.queueSizeThreshold "port" .Values.probeIngest.ports.http)) }}
{{- $probeIngestKedaArgs := dict "ServiceName" "probe-ingest" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.probeIngest.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $probeIngestKedaArgs }}
{{- end }}
{{/* Probe KEDA ScaledObjects - one for each probe configuration */}}
{{- range $key, $val := $.Values.probes }}
{{- if and $.Values.keda.enabled $val.keda.enabled (not $val.disableAutoscaler) }}
{{- $serviceName := printf "probe-%s" $key }}
{{- $probePort := 3874 }}
{{- if and $val.ports $val.ports.http }}
{{- $probePort = $val.ports.http }}
{{- end }}
{{- $metricsConfig := dict "enabled" $val.keda.enabled "minReplicas" $val.keda.minReplicas "maxReplicas" $val.keda.maxReplicas "pollingInterval" $val.keda.pollingInterval "cooldownPeriod" $val.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_probe_queue_size" "threshold" $val.keda.queueSizeThreshold "port" $probePort)) }}
{{- $probeKedaArgs := dict "ServiceName" $serviceName "Release" $.Release "Values" $.Values "MetricsConfig" $metricsConfig "DisableAutoscaler" $val.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $probeKedaArgs }}
{{- end }}
{{- end }}

View File

@@ -15,9 +15,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "nginx" }}
{{- if $.Values.nginx.replicaCount }}
replicas: {{ $.Values.nginx.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.nginx.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -108,7 +112,7 @@ spec:
- name: NGINX_LISTEN_OPTIONS
value: {{ $.Values.nginx.listenOptions | quote }}
- name: ONEUPTIME_HTTP_PORT
value: {{ $.Values.port.nginxHttp | quote }}
value: {{ $.Values.nginx.ports.http | quote }}
- name: PORT
value: "7851" # Port for the nodejs server for live and ready status
- name: DISABLE_TELEMETRY
@@ -154,10 +158,10 @@ spec:
{{- end }}
{{- end }}
ports:
- port: {{ $.Values.port.nginxHttp }}
- port: {{ $.Values.nginx.ports.http }}
targetPort: 7849
name: oneuptime-http
- port: {{ $.Values.port.statusPageHttpsPort }}
- port: {{ $.Values.nginx.ports.https }}
targetPort: 7850
name: statuspage-ssl
selector:

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "open-telemetry-ingest" }}
{{- if $.Values.deployment.openTelemetryIngest.replicaCount }}
replicas: {{ $.Values.deployment.openTelemetryIngest.replicaCount }}
{{- if $.Values.openTelemetryIngest.replicaCount }}
replicas: {{ $.Values.openTelemetryIngest.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.openTelemetryIngest.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -57,7 +57,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.openTelemetryIngest }}
port: {{ $.Values.openTelemetryIngest.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -66,7 +66,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.openTelemetryIngest }}
port: {{ $.Values.openTelemetryIngest.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -76,7 +76,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.openTelemetryIngest }}
port: {{ $.Values.openTelemetryIngest.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -90,11 +90,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.openTelemetryIngest | quote }}
value: {{ $.Values.openTelemetryIngest.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.openTelemetryIngest.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.openTelemetryIngest }}
- containerPort: {{ $.Values.openTelemetryIngest.ports.http }}
protocol: TCP
name: http
{{- if $.Values.openTelemetryIngest.resources }}
@@ -106,14 +106,14 @@ spec:
---
# OneUptime open-telemetry-ingest Service
{{- $openTelemetryIngestPorts := dict "port" $.Values.port.openTelemetryIngest -}}
{{- $openTelemetryIngestPorts := dict "port" $.Values.openTelemetryIngest.ports.http -}}
{{- $openTelemetryIngestServiceArgs := dict "ServiceName" "open-telemetry-ingest" "Ports" $openTelemetryIngestPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $openTelemetryIngestServiceArgs }}
---
# OneUptime open-telemetry-ingest autoscaler
{{- if not $.Values.openTelemetryIngest.disableAutoscaler }}
{{- if and (not $.Values.openTelemetryIngest.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.openTelemetryIngest.keda.enabled)) }}
{{- $openTelemetryIngestAutoScalerArgs := dict "ServiceName" "open-telemetry-ingest" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $openTelemetryIngestAutoScalerArgs }}
{{- end }}
---
---

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "otel-collector" }}
{{- if $.Values.deployment.otelCollector.replicaCount }}
replicas: {{ $.Values.deployment.otelCollector.replicaCount }}
{{- if $.Values.openTelemetryCollector.replicaCount }}
replicas: {{ $.Values.openTelemetryCollector.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.openTelemetryCollector.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -91,7 +91,7 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.otelCollectorGrpc | quote }}
value: {{ $.Values.openTelemetryCollector.ports.grpc | quote }}
- name: OPENTELEMETRY_COLLECTOR_SENDING_QUEUE_ENABLED
value: {{ $.Values.openTelemetryCollector.sendingQueue.enabled | quote }}
- name: OPENTELEMETRY_COLLECTOR_SENDING_QUEUE_NUM_CONSUMERS
@@ -101,10 +101,10 @@ spec:
- name: DISABLE_TELEMETRY
value: {{ $.Values.openTelemetryCollector.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.otelCollectorHttp }}
- containerPort: {{ $.Values.openTelemetryCollector.ports.http }}
protocol: TCP
name: http
- containerPort: {{ $.Values.port.otelCollectorGrpc }}
- containerPort: {{ $.Values.openTelemetryCollector.ports.grpc }}
protocol: TCP
name: grpc
{{- if $.Values.openTelemetryCollector.resources }}
@@ -115,7 +115,7 @@ spec:
---
# OneUptime otel-collector Service
{{- $otelCollectorPorts := dict "grpc" $.Values.port.otelCollectorGrpc "http" $.Values.port.otelCollectorHttp -}}
{{- $otelCollectorPorts := dict "grpc" $.Values.openTelemetryCollector.ports.grpc "http" $.Values.openTelemetryCollector.ports.http -}}
{{- $identityServiceArgs := dict "ServiceName" "otel-collector" "Ports" $otelCollectorPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $identityServiceArgs }}
---

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "probe-ingest" }}
{{- if $.Values.deployment.probeIngest.replicaCount }}
replicas: {{ $.Values.deployment.probeIngest.replicaCount }}
{{- if $.Values.probeIngest.replicaCount }}
replicas: {{ $.Values.probeIngest.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.probeIngest.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -57,7 +57,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.probeIngest }}
port: {{ $.Values.probeIngest.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -66,7 +66,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.probeIngest }}
port: {{ $.Values.probeIngest.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -76,7 +76,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.probeIngest }}
port: {{ $.Values.probeIngest.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -90,11 +90,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.probeIngest | quote }}
value: {{ $.Values.probeIngest.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.probeIngest.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.probeIngest }}
- containerPort: {{ $.Values.probeIngest.ports.http }}
protocol: TCP
name: http
{{- if $.Values.probeIngest.resources }}
@@ -106,13 +106,13 @@ spec:
---
# OneUptime probe-ingest Service
{{- $probeIngestPorts := dict "port" $.Values.port.probeIngest -}}
{{- $probeIngestPorts := dict "port" $.Values.probeIngest.ports.http -}}
{{- $probeIngestServiceArgs := dict "ServiceName" "probe-ingest" "Ports" $probeIngestPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $probeIngestServiceArgs }}
---
# OneUptime probe-ingest autoscaler
{{- if not $.Values.probeIngest.disableAutoscaler }}
{{- if and (not $.Values.probeIngest.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.probeIngest.keda.enabled)) }}
{{- $probeIngestAutoScalerArgs := dict "ServiceName" "probe-ingest" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $probeIngestAutoScalerArgs }}
{{- end }}

View File

@@ -59,13 +59,17 @@ spec:
- name: LOG_LEVEL
value: {{ $.Values.logLevel }}
- name: PORT
value: {{ $.Values.port.probe | squote }}
{{- if and $val.ports $val.ports.http }}
value: {{ $val.ports.http | squote }}
{{- else }}
value: "3874"
{{- end }}
- name: OPENTELEMETRY_EXPORTER_OTLP_HEADERS
value: {{ $.Values.openTelemetryExporter.headers }}
- name: OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT
value: {{ $.Values.openTelemetryExporter.endpoint }}
- name: ONEUPTIME_URL
value: http://{{ $.Release.Name }}-probe-ingest.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}:{{ $.Values.port.probeIngest }}
value: http://{{ $.Release.Name }}-probe-ingest.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}:{{ $.Values.probeIngest.ports.http }}
- name: PROBE_NAME
value: {{ $val.name }}
- name: PROBE_DESCRIPTION
@@ -100,6 +104,10 @@ spec:
value: {{ $val.disableTelemetryCollection | quote }}
{{- end }}
{{- include "oneuptime.env.oneuptimeSecret" $ | nindent 12 }}
ports:
- containerPort: {{ if and $val.ports $val.ports.http }}{{ $val.ports.http }}{{ else }}3874{{ end }}
protocol: TCP
name: http
{{- if $val.resources }}
resources:
{{- toYaml $val.resources | nindent 12 }}
@@ -110,12 +118,22 @@ spec:
restartPolicy: {{ $.Values.image.restartPolicy }}
---
{{- if not $val.disableAutoscaler }}
# OneUptime probe Service
{{- $probePort := 3874 }}
{{- if and $val.ports $val.ports.http }}
{{- $probePort = $val.ports.http }}
{{- end }}
{{- $probePorts := dict "port" $probePort -}}
{{- $probeServiceArgs := dict "ServiceName" (printf "probe-%s" $key) "Ports" $probePorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $probeServiceArgs }}
---
{{- if and (not $val.disableAutoscaler) (not (and $.Values.keda.enabled $val.keda.enabled)) }}
# OneUptime probe autoscaler
{{- $probeAutoScalerArgs := dict "ServiceName" (printf "probe-%s" $key) "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $probeAutoScalerArgs }}
{{- end }}
---
{{- end }}
{{- end }}

View File

@@ -12,8 +12,16 @@ stringData:
## This is a workaround to keep the secrets unchanged
{{- if .Release.IsUpgrade }}
{{- if .Values.oneuptimeSecret }}
oneuptime-secret: {{ .Values.oneuptimeSecret | quote }}
{{- else }}
oneuptime-secret: {{ index (lookup "v1" "Secret" $.Release.Namespace (printf "%s-secrets" $.Release.Name)).data "oneuptime-secret" | b64dec }}
{{- end }}
{{- if .Values.encryptionSecret }}
encryption-secret: {{ .Values.encryptionSecret | quote }}
{{- else }}
encryption-secret: {{ index (lookup "v1" "Secret" $.Release.Namespace (printf "%s-secrets" $.Release.Name)).data "encryption-secret" | b64dec }}
{{- end }}
{{- range $key, $val := $.Values.probes }}
{{- if (index (lookup "v1" "Secret" $.Release.Namespace (printf "%s-secrets" $.Release.Name)).data (printf "probe-%s" $key)) }}
@@ -25,8 +33,16 @@ stringData:
{{ else }} # install operation
{{- if .Values.oneuptimeSecret }}
oneuptime-secret: {{ .Values.oneuptimeSecret | quote }}
{{- else }}
oneuptime-secret: {{ randAlphaNum 32 | quote }}
{{- end }}
{{- if .Values.encryptionSecret }}
encryption-secret: {{ .Values.encryptionSecret | quote }}
{{- else }}
encryption-secret: {{ randAlphaNum 32 | quote }}
{{- end }}
{{- range $key, $val := $.Values.probes }}
{{printf "probe-%s" $key}}: {{ randAlphaNum 32 | quote }}

View File

@@ -15,8 +15,8 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "server-monitor-ingest" }}
{{- if $.Values.deployment.serverMonitorIngest.replicaCount }}
replicas: {{ $.Values.deployment.serverMonitorIngest.replicaCount }}
{{- if $.Values.serverMonitorIngest.replicaCount }}
replicas: {{ $.Values.serverMonitorIngest.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.serverMonitorIngest.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
@@ -57,7 +57,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.serverMonitorIngest }}
port: {{ $.Values.serverMonitorIngest.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -66,7 +66,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.serverMonitorIngest }}
port: {{ $.Values.serverMonitorIngest.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -76,7 +76,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.serverMonitorIngest }}
port: {{ $.Values.serverMonitorIngest.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -90,11 +90,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.serverMonitorIngest | quote }}
value: {{ $.Values.serverMonitorIngest.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.serverMonitorIngest.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.serverMonitorIngest }}
- containerPort: {{ $.Values.serverMonitorIngest.ports.http }}
protocol: TCP
name: http
{{- if $.Values.serverMonitorIngest.resources }}
@@ -106,13 +106,13 @@ spec:
---
# OneUptime server-monitor-ingest Service
{{- $serverMonitorIngestPorts := dict "port" $.Values.port.serverMonitorIngest -}}
{{- $serverMonitorIngestPorts := dict "port" $.Values.serverMonitorIngest.ports.http -}}
{{- $serverMonitorIngestServiceArgs := dict "ServiceName" "server-monitor-ingest" "Ports" $serverMonitorIngestPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $serverMonitorIngestServiceArgs }}
---
# OneUptime server-monitor-ingest autoscaler
{{- if not $.Values.serverMonitorIngest.disableAutoscaler }}
{{- if and (not $.Values.serverMonitorIngest.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.serverMonitorIngest.keda.enabled)) }}
{{- $serverMonitorIngestAutoScalerArgs := dict "ServiceName" "server-monitor-ingest" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $serverMonitorIngestAutoScalerArgs }}
{{- end }}

View File

@@ -1,12 +1,12 @@
# OneUptime statusPage Deployment
{{- $statusPagePorts := dict "port" $.Values.port.statusPage -}}
{{- $statusPageEnv := dict "PORT" $.Values.port.statusPage "DISABLE_TELEMETRY" $.Values.statusPage.disableTelemetryCollection -}}
{{- $statusPageDeploymentArgs :=dict "IsUI" true "ServiceName" "status-page" "Ports" $statusPagePorts "Release" $.Release "Values" $.Values "Env" $statusPageEnv "Resources" $.Values.statusPage.resources "DisableAutoscaler" $.Values.statusPage.disableAutoscaler -}}
{{- $statusPagePorts := dict "port" $.Values.statusPage.ports.http -}}
{{- $statusPageEnv := dict "PORT" $.Values.statusPage.ports.http "DISABLE_TELEMETRY" $.Values.statusPage.disableTelemetryCollection -}}
{{- $statusPageDeploymentArgs :=dict "IsUI" true "ServiceName" "status-page" "Ports" $statusPagePorts "Release" $.Release "Values" $.Values "Env" $statusPageEnv "Resources" $.Values.statusPage.resources "DisableAutoscaler" $.Values.statusPage.disableAutoscaler "ReplicaCount" $.Values.statusPage.replicaCount -}}
{{- include "oneuptime.deployment" $statusPageDeploymentArgs }}
---
# OneUptime statusPage Service
{{- $statusPagePorts := dict "port" $.Values.port.statusPage -}}
{{- $statusPagePorts := dict "port" $.Values.statusPage.ports.http -}}
{{- $statusPageServiceArgs := dict "ServiceName" "status-page" "Ports" $statusPagePorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $statusPageServiceArgs }}
---

View File

@@ -1,15 +1,15 @@
{{- if $.Values.testServer.enabled }}
# OneUptime testServer Deployment
{{- $testServerPorts := dict "port" $.Values.port.testServer -}}
{{- $testServerEnv := dict "PORT" $.Values.port.testServer "DISABLE_TELEMETRY" $.Values.testServer.disableTelemetryCollection -}}
{{- $testServerDeploymentArgs :=dict "IsUI" true "ServiceName" "test-server" "Ports" $testServerPorts "Release" $.Release "Values" $.Values "Env" $testServerEnv "Resources" $.Values.testServer.resources "DisableAutoscaler" $.Values.testServer.disableAutoscaler -}}
# OneUptime test-server Deployment
{{- $testServerPorts := $.Values.testServer.ports -}}
{{- $testServerEnv := dict "PORT" $.Values.testServer.ports.http "DISABLE_TELEMETRY" $.Values.testServer.disableTelemetryCollection -}}
{{- $testServerDeploymentArgs :=dict "IsUI" true "ServiceName" "test-server" "Ports" $testServerPorts "Release" $.Release "Values" $.Values "Env" $testServerEnv "Resources" $.Values.testServer.resources "DisableAutoscaler" $.Values.testServer.disableAutoscaler "ReplicaCount" $.Values.testServer.replicaCount -}}
{{- include "oneuptime.deployment" $testServerDeploymentArgs }}
---
# OneUptime testServer Service
{{- $testServerPorts := dict "port" $.Values.port.testServer -}}
{{- $testServerServiceArgs := dict "ServiceName" "test-server" "Ports" $testServerPorts "Release" $.Release "Values" $.Values -}}
# OneUptime test-server Service
{{- $testServerPorts := $.Values.testServer.ports -}}
{{- $testServerServiceArgs := dict "ServiceName" "test-server" "Ports" $testServerPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $testServerServiceArgs }}
---

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "worker" }}
{{- if $.Values.worker.replicaCount }}
replicas: {{ $.Values.worker.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.worker.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -48,7 +52,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.worker }}
port: {{ $.Values.worker.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -57,7 +61,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.worker }}
port: {{ $.Values.worker.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -67,7 +71,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.worker }}
port: {{ $.Values.worker.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -81,11 +85,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.worker | quote }}
value: {{ $.Values.worker.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.worker.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.worker }}
- containerPort: {{ $.Values.worker.ports.http }}
protocol: TCP
name: http
{{- if $.Values.worker.resources }}
@@ -97,7 +101,7 @@ spec:
---
# OneUptime app Service
{{- $workerPorts := dict "port" $.Values.port.worker -}}
{{- $workerPorts := $.Values.worker.ports -}}
{{- $workerServiceArgs := dict "ServiceName" "worker" "Ports" $workerPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $workerServiceArgs }}
---

View File

@@ -14,9 +14,13 @@ spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "workflow" }}
{{- if $.Values.workflow.replicaCount }}
replicas: {{ $.Values.workflow.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.workflow.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
template:
metadata:
labels:
@@ -48,7 +52,7 @@ spec:
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.workflow }}
port: {{ $.Values.workflow.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
@@ -57,7 +61,7 @@ spec:
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.workflow }}
port: {{ $.Values.workflow.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
@@ -67,7 +71,7 @@ spec:
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.workflow }}
port: {{ $.Values.workflow.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
@@ -81,11 +85,11 @@ spec:
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
- name: PORT
value: {{ $.Values.port.workflow | quote }}
value: {{ $.Values.workflow.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.workflow.disableTelemetryCollection | quote }}
ports:
- containerPort: {{ $.Values.port.workflow }}
- containerPort: {{ $.Values.workflow.ports.http }}
protocol: TCP
name: http
{{- if $.Values.workflow.resources }}
@@ -98,7 +102,7 @@ spec:
---
# OneUptime app Service
{{- $workflowPorts := dict "port" $.Values.port.workflow -}}
{{- $workflowPorts := $.Values.workflow.ports -}}
{{- $workflowServiceArgs := dict "ServiceName" "workflow" "Ports" $workflowPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $workflowServiceArgs }}
---

View File

@@ -28,19 +28,7 @@ fluentdHost:
deployment:
# Default replica count for all deployments
replicaCount: 1
probeIngest:
replicaCount:
serverMonitorIngest:
replicaCount:
openTelemetryIngest:
replicaCount:
fluentIngest:
replicaCount:
incomingRequestIngest:
replicaCount:
otelCollector:
replicaCount:
replicaCount: 1
metalLb:
enabled: false
@@ -50,10 +38,14 @@ metalLb:
# - 51.158.55.153/32 # List of IP addresses of all the servers in the cluster.
nginx:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
listenAddress: ""
listenOptions: ""
ports:
http: 80
https: 443
service:
loadBalancerIP:
type: LoadBalancer
@@ -191,9 +183,12 @@ alerts:
# 2. Set the statusPage.cnameRecord to "oneuptime.yourcompany.com"
# 3. Create CNAME record in your DNS provider with the name "status.yourcompany.com" and value "oneuptime.yourcompany.com"
statusPage:
replicaCount: 1
cnameRecord:
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3105
probes:
one:
@@ -208,6 +203,19 @@ probes:
customCodeMonitorScriptTimeoutInMs: 60000
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3874
# KEDA autoscaling configuration based on monitor queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold per probe
queueSizeThreshold: 10
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
# resources:
# additionalContainers:
# two:
@@ -217,43 +225,34 @@ probes:
# monitorFetchLimit: 10
# key:
# replicaCount: 1
# ports:
# http: 3874
# syntheticMonitorScriptTimeoutInMs: 60000
# customCodeMonitorScriptTimeoutInMs: 60000
# disableTelemetryCollection: false
# disableAutoscaler: false
# resources:
# additionalContainers:
# KEDA autoscaling configuration based on monitor queue metrics
# keda:
# enabled: false
# minReplicas: 1
# maxReplicas: 100
# # Scale up when queue size exceeds this threshold per probe
# queueSizeThreshold: 10
# # Polling interval for metrics (in seconds)
# pollingInterval: 30
# # Cooldown period after scaling (in seconds)
# cooldownPeriod: 300
port:
app: 3002
probeIngest: 3400
serverMonitorIngest: 3404
openTelemetryIngest: 3403
fluentIngest: 3401
incomingRequestIngest: 3402
testServer: 3800
accounts: 3003
statusPage: 3105
dashboard: 3009
adminDashboard: 3158
# This is where oneuptime server is hosted on.
nginxHttp: 80
# If you are connecting Status Pages to custom domains, then this will be the port where the status page will be hosted on.
statusPageHttpsPort: 443
otelCollectorGrpc: 4317
otelCollectorHttp: 4318
isolatedVM: 4572
home: 1444
worker: 1445
workflow: 3099
apiReference: 1446
docs: 1447
testServer:
replicaCount: 1
enabled: false
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3800
openTelemetryExporter:
@@ -431,88 +430,192 @@ readinessProbe: # Readiness probe configuration
# OpenTelemetry Collector Configuration
openTelemetryCollector:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
grpc: 4317
http: 4318
sendingQueue:
enabled: true
size: 1000
numConsumers: 3
accounts:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3003
resources:
home:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 1444
resources:
dashboard:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3009
resources:
adminDashboard:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3158
resources:
worker:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 1445
resources:
workflow:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
workflowTimeoutInMs: 5000
ports:
http: 3099
resources:
apiReference:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 1446
resources:
docs:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 1447
resources:
app:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3002
resources:
probeIngest:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3400
resources:
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
openTelemetryIngest:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3403
resources:
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
fluentIngest:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3401
resources:
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
incomingRequestIngest:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3402
resources:
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
isolatedVM:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 4572
resources:
serverMonitorIngest:
replicaCount: 1
disableTelemetryCollection: false
disableAutoscaler: false
ports:
http: 3404
resources:
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
slackApp:

View File

@@ -1,12 +1,6 @@
import HTTPMethod from "Common/Types/API/HTTPMethod";
import OneUptimeDate from "Common/Types/Date";
import Dictionary from "Common/Types/Dictionary";
import BadDataException from "Common/Types/Exception/BadDataException";
import { JSONObject } from "Common/Types/JSON";
import IncomingMonitorRequest from "Common/Types/Monitor/IncomingMonitor/IncomingMonitorRequest";
import MonitorType from "Common/Types/Monitor/MonitorType";
import ObjectID from "Common/Types/ObjectID";
import MonitorService from "Common/Server/Services/MonitorService";
import Express, {
ExpressRequest,
ExpressResponse,
@@ -14,10 +8,9 @@ import Express, {
NextFunction,
RequestHandler,
} from "Common/Server/Utils/Express";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import Response from "Common/Server/Utils/Response";
import Monitor from "Common/Models/DatabaseModels/Monitor";
import logger from "Common/Server/Utils/Logger";
import IncomingRequestIngestQueueService from "../Services/Queue/IncomingRequestIngestQueueService";
import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
@@ -38,63 +31,18 @@ const processIncomingRequest: RequestHandler = async (
throw new BadDataException("Invalid Secret Key");
}
const isGetRequest: boolean = req.method === "GET";
const isPostRequest: boolean = req.method === "POST";
// Return response immediately
Response.sendEmptySuccessResponse(req, res);
let httpMethod: HTTPMethod = HTTPMethod.GET;
if (isGetRequest) {
httpMethod = HTTPMethod.GET;
}
if (isPostRequest) {
httpMethod = HTTPMethod.POST;
}
const monitor: Monitor | null = await MonitorService.findOneBy({
query: {
incomingRequestSecretKey: new ObjectID(monitorSecretKeyAsString),
monitorType: MonitorType.IncomingRequest,
},
select: {
_id: true,
projectId: true,
},
props: {
isRoot: true,
},
});
if (!monitor || !monitor._id) {
throw new BadDataException("Monitor not found");
}
if (!monitor.projectId) {
throw new BadDataException("Project not found");
}
const now: Date = OneUptimeDate.getCurrentDate();
const incomingRequest: IncomingMonitorRequest = {
projectId: monitor.projectId,
monitorId: new ObjectID(monitor._id.toString()),
// Add to queue for asynchronous processing
await IncomingRequestIngestQueueService.addIncomingRequestIngestJob({
secretKey: monitorSecretKeyAsString,
requestHeaders: requestHeaders,
requestBody: requestBody,
incomingRequestReceivedAt: now,
onlyCheckForIncomingRequestReceivedAt: false,
requestMethod: httpMethod,
checkedAt: now,
};
// process probe response here.
MonitorResourceUtil.monitorResource(incomingRequest).catch((err: Error) => {
// do nothing.
// we don't want to throw error here.
// we just want to log the error.
logger.error(err);
requestMethod: req.method,
});
return Response.sendEmptySuccessResponse(req, res);
return;
} catch (err) {
return next(err);
}
@@ -122,4 +70,90 @@ router.get(
},
);
// Queue stats endpoint
router.get(
"/incoming-request/queue/stats",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const stats: {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
} = await IncomingRequestIngestQueueService.getQueueStats();
return Response.sendJsonObjectResponse(req, res, stats);
} catch (err) {
return next(err);
}
},
);
// Queue size endpoint
router.get(
"/incoming-request/queue/size",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const size: number =
await IncomingRequestIngestQueueService.getQueueSize();
return Response.sendJsonObjectResponse(req, res, { size });
} catch (err) {
return next(err);
}
},
);
// Queue failed jobs endpoint
router.get(
"/incoming-request/queue/failed",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// Parse pagination parameters from query string
const start: number = parseInt(req.query["start"] as string) || 0;
const end: number = parseInt(req.query["end"] as string) || 100;
const failedJobs: Array<{
id: string;
name: string;
data: any;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}> = await IncomingRequestIngestQueueService.getFailedJobs({
start,
end,
});
return Response.sendJsonObjectResponse(req, res, {
failedJobs,
pagination: {
start,
end,
count: failedJobs.length,
},
});
} catch (err) {
return next(err);
}
},
);
export default router;

View File

@@ -0,0 +1,38 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import IncomingRequestIngestQueueService from "../Services/Queue/IncomingRequestIngestQueueService";
// import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
/**
* JSON metrics endpoint for KEDA autoscaling
* Returns queue size as JSON for KEDA metrics-api scaler
*/
router.get(
"/metrics/queue-size",
// ClusterKeyAuthorization.isAuthorizedServiceMiddleware, // Temporarily disabled for KEDA debugging
async (
_req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const queueSize: number =
await IncomingRequestIngestQueueService.getQueueSize();
res.setHeader("Content-Type", "application/json");
res.status(200).json({
queueSize: queueSize,
});
} catch (err) {
return next(err);
}
},
);
export default router;

View File

@@ -1,4 +1,5 @@
import IncomingRequestAPI from "./API/IncomingRequest";
import MetricsAPI from "./API/Metrics";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
import PostgresAppInstance from "Common/Server/Infrastructure/PostgresDatabase";
@@ -9,6 +10,7 @@ import logger from "Common/Server/Utils/Logger";
import Realtime from "Common/Server/Utils/Realtime";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import "./Jobs/IncomingRequestIngest/ProcessIncomingRequestIngest";
import "ejs";
const app: ExpressApplication = Express.getExpressApp();
@@ -16,6 +18,7 @@ const app: ExpressApplication = Express.getExpressApp();
const APP_NAME: string = "incoming-request-ingest";
app.use([`/${APP_NAME}`, "/"], IncomingRequestAPI);
app.use([`/${APP_NAME}`, "/"], MetricsAPI);
const init: PromiseVoidFunction = async (): Promise<void> => {
try {

View File

@@ -0,0 +1,104 @@
import { IncomingRequestIngestJobData } from "../../Services/Queue/IncomingRequestIngestQueueService";
import logger from "Common/Server/Utils/Logger";
import { QueueJob, QueueName } from "Common/Server/Infrastructure/Queue";
import QueueWorker from "Common/Server/Infrastructure/QueueWorker";
import HTTPMethod from "Common/Types/API/HTTPMethod";
import OneUptimeDate from "Common/Types/Date";
import Dictionary from "Common/Types/Dictionary";
import BadDataException from "Common/Types/Exception/BadDataException";
import { JSONObject } from "Common/Types/JSON";
import IncomingMonitorRequest from "Common/Types/Monitor/IncomingMonitor/IncomingMonitorRequest";
import MonitorType from "Common/Types/Monitor/MonitorType";
import ObjectID from "Common/Types/ObjectID";
import MonitorService from "Common/Server/Services/MonitorService";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import Monitor from "Common/Models/DatabaseModels/Monitor";
// Set up the worker for processing incoming request ingest queue
QueueWorker.getWorker(
QueueName.IncomingRequestIngest,
async (job: QueueJob): Promise<void> => {
logger.debug(`Processing incoming request ingestion job: ${job.name}`);
try {
const jobData: IncomingRequestIngestJobData =
job.data as IncomingRequestIngestJobData;
await processIncomingRequestFromQueue(jobData);
logger.debug(
`Successfully processed incoming request ingestion job: ${job.name}`,
);
} catch (error) {
logger.error(`Error processing incoming request ingestion job:`);
logger.error(error);
throw error;
}
},
{ concurrency: 20 }, // Process up to 20 incoming request ingest jobs concurrently
);
async function processIncomingRequestFromQueue(
jobData: IncomingRequestIngestJobData,
): Promise<void> {
const requestHeaders: Dictionary<string> = jobData.requestHeaders;
const requestBody: string | JSONObject = jobData.requestBody;
const monitorSecretKeyAsString: string = jobData.secretKey;
if (!monitorSecretKeyAsString) {
throw new BadDataException("Invalid Secret Key");
}
const isGetRequest: boolean = jobData.requestMethod === "GET";
const isPostRequest: boolean = jobData.requestMethod === "POST";
let httpMethod: HTTPMethod = HTTPMethod.GET;
if (isGetRequest) {
httpMethod = HTTPMethod.GET;
}
if (isPostRequest) {
httpMethod = HTTPMethod.POST;
}
const monitor: Monitor | null = await MonitorService.findOneBy({
query: {
incomingRequestSecretKey: new ObjectID(monitorSecretKeyAsString),
monitorType: MonitorType.IncomingRequest,
},
select: {
_id: true,
projectId: true,
},
props: {
isRoot: true,
},
});
if (!monitor || !monitor._id) {
throw new BadDataException("Monitor not found");
}
if (!monitor.projectId) {
throw new BadDataException("Project not found");
}
const now: Date = OneUptimeDate.getCurrentDate();
const incomingRequest: IncomingMonitorRequest = {
projectId: monitor.projectId,
monitorId: new ObjectID(monitor._id.toString()),
requestHeaders: requestHeaders,
requestBody: requestBody,
incomingRequestReceivedAt: now,
onlyCheckForIncomingRequestReceivedAt: false,
requestMethod: httpMethod,
checkedAt: now,
};
// process probe response here.
await MonitorResourceUtil.monitorResource(incomingRequest);
}
logger.debug("Incoming request ingest worker initialized");

View File

@@ -0,0 +1,80 @@
import Queue, { QueueName } from "Common/Server/Infrastructure/Queue";
import { JSONObject } from "Common/Types/JSON";
import OneUptimeDate from "Common/Types/Date";
import logger from "Common/Server/Utils/Logger";
import Dictionary from "Common/Types/Dictionary";
export interface IncomingRequestIngestJobData {
secretKey: string;
requestHeaders: Dictionary<string>;
requestBody: string | JSONObject;
requestMethod: string;
ingestionTimestamp: Date;
}
export default class IncomingRequestIngestQueueService {
public static async addIncomingRequestIngestJob(data: {
secretKey: string;
requestHeaders: Dictionary<string>;
requestBody: string | JSONObject;
requestMethod: string;
}): Promise<void> {
try {
const jobData: IncomingRequestIngestJobData = {
secretKey: data.secretKey,
requestHeaders: data.requestHeaders,
requestBody: data.requestBody,
requestMethod: data.requestMethod,
ingestionTimestamp: OneUptimeDate.getCurrentDate(),
};
const jobId: string = `incoming-request-${data.secretKey}-${OneUptimeDate.getCurrentDateAsUnixNano()}`;
await Queue.addJob(
QueueName.IncomingRequestIngest,
jobId,
"ProcessIncomingRequestIngest",
jobData as unknown as JSONObject,
);
logger.debug(`Added incoming request ingestion job: ${jobId}`);
} catch (error) {
logger.error(`Error adding incoming request ingestion job:`);
logger.error(error);
throw error;
}
}
public static async getQueueSize(): Promise<number> {
return Queue.getQueueSize(QueueName.IncomingRequestIngest);
}
public static async getQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
}> {
return Queue.getQueueStats(QueueName.IncomingRequestIngest);
}
public static getFailedJobs(options?: {
start?: number;
end?: number;
}): Promise<
Array<{
id: string;
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}>
> {
return Queue.getFailedJobs(QueueName.IncomingRequestIngest, options);
}
}

View File

@@ -5,62 +5,29 @@ import Express, {
NextFunction,
} from "Common/Server/Utils/Express";
import TelemetryQueueService from "../Services/Queue/TelemetryQueueService";
import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
// import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
/**
* Prometheus metrics endpoint for KEDA autoscaling
* Exposes queue metrics in Prometheus format
* JSON metrics endpoint for KEDA autoscaling
* Returns queue size as JSON for KEDA metrics-api scaler
*/
router.get(
"/metrics",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
"/metrics/queue-size",
// ClusterKeyAuthorization.isAuthorizedServiceMiddleware, // Temporarily disabled for KEDA debugging
async (
_req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const [queueSize, queueStats] = await Promise.all([
TelemetryQueueService.getQueueSize(),
TelemetryQueueService.getQueueStats(),
]);
const queueSize: number = await TelemetryQueueService.getQueueSize();
// Generate Prometheus metrics format
const metrics: string = [
"# HELP oneuptime_telemetry_queue_size Current size of the telemetry queue",
"# TYPE oneuptime_telemetry_queue_size gauge",
`oneuptime_telemetry_queue_size ${queueSize}`,
"",
"# HELP oneuptime_telemetry_queue_waiting Number of waiting jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_waiting gauge",
`oneuptime_telemetry_queue_waiting ${queueStats.waiting}`,
"",
"# HELP oneuptime_telemetry_queue_active Number of active jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_active gauge",
`oneuptime_telemetry_queue_active ${queueStats.active}`,
"",
"# HELP oneuptime_telemetry_queue_completed Number of completed jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_completed counter",
`oneuptime_telemetry_queue_completed ${queueStats.completed}`,
"",
"# HELP oneuptime_telemetry_queue_failed Number of failed jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_failed counter",
`oneuptime_telemetry_queue_failed ${queueStats.failed}`,
"",
"# HELP oneuptime_telemetry_queue_delayed Number of delayed jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_delayed gauge",
`oneuptime_telemetry_queue_delayed ${queueStats.delayed}`,
"",
"# HELP oneuptime_telemetry_queue_total Total number of jobs in the telemetry queue",
"# TYPE oneuptime_telemetry_queue_total gauge",
`oneuptime_telemetry_queue_total ${queueStats.total}`,
"",
].join("\n");
res.setHeader("Content-Type", "text/plain; version=0.0.4; charset=utf-8");
res.status(200).send(metrics);
res.setHeader("Content-Type", "application/json");
res.status(200).json({
queueSize: queueSize,
});
} catch (err) {
return next(err);
}

View File

@@ -120,6 +120,7 @@ router.get(
name: string;
data: any;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;

View File

@@ -98,6 +98,7 @@ export default class TelemetryQueueService {
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;

84
Probe/API/Metrics.ts Normal file
View File

@@ -0,0 +1,84 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import Response from "Common/Server/Utils/Response";
import { PROBE_INGEST_URL } from "../Config";
import HTTPErrorResponse from "Common/Types/API/HTTPErrorResponse";
import HTTPMethod from "Common/Types/API/HTTPMethod";
import HTTPResponse from "Common/Types/API/HTTPResponse";
import URL from "Common/Types/API/URL";
import { JSONObject } from "Common/Types/JSON";
import API from "Common/Utils/API";
import logger from "Common/Server/Utils/Logger";
import ProbeAPIRequest from "../Utils/ProbeAPIRequest";
const router: ExpressRouter = Express.getRouter();
// Metrics endpoint for Keda autoscaling
router.get(
"/queue-size",
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// Get the pending monitor count for this specific probe from ProbeIngest API
const queueSizeUrl: URL = URL.fromString(
PROBE_INGEST_URL.toString(),
).addRoute("/metrics/queue-size");
logger.debug("Fetching queue size from ProbeIngest API");
// Use probe authentication (probe key and probe ID)
const requestBody: JSONObject = ProbeAPIRequest.getDefaultRequestBody();
const result: HTTPResponse<JSONObject> | HTTPErrorResponse =
await API.fetch<JSONObject>(
HTTPMethod.POST,
queueSizeUrl,
requestBody,
{},
);
if (result instanceof HTTPErrorResponse) {
logger.error("Error fetching queue size from ProbeIngest API");
logger.error(result);
throw result;
}
logger.debug("Queue size fetched successfully from ProbeIngest API");
logger.debug(result.data);
// Extract queueSize from the response
let queueSize: number = (result.data["queueSize"] as number) || 0;
// if string then convert to number
if (typeof queueSize === "string") {
const parsedQueueSize: number = parseInt(queueSize, 10);
if (!isNaN(parsedQueueSize)) {
queueSize = parsedQueueSize;
} else {
logger.warn("Queue size is not a valid number, defaulting to 0");
queueSize = 0;
}
}
logger.debug(`Queue size fetched: ${queueSize}`);
return Response.sendJsonObjectResponse(req, res, {
queueSize: queueSize,
});
} catch (err) {
logger.error("Error in metrics queue-size endpoint");
logger.error(err);
return next(err);
}
},
);
export default router;

View File

@@ -3,10 +3,12 @@ import AliveJob from "./Jobs/Alive";
import FetchMonitorList from "./Jobs/Monitor/FetchList";
import FetchMonitorTestList from "./Jobs/Monitor/FetchMonitorTest";
import Register from "./Services/Register";
import MetricsAPI from "./API/Metrics";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import logger from "Common/Server/Utils/Logger";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import Express, { ExpressApplication } from "Common/Server/Utils/Express";
import "ejs";
const APP_NAME: string = "probe";
@@ -29,6 +31,10 @@ const init: PromiseVoidFunction = async (): Promise<void> => {
},
});
// Add metrics API routes
const app: ExpressApplication = Express.getExpressApp();
app.use("/metrics", MetricsAPI);
// add default routes
await App.addDefaultRoutes();

View File

@@ -0,0 +1,37 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import ProbeIngestQueueService from "../Services/Queue/ProbeIngestQueueService";
// import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
/**
* JSON metrics endpoint for KEDA autoscaling
* Returns queue size as JSON for KEDA metrics-api scaler
*/
router.get(
"/metrics/queue-size",
// ClusterKeyAuthorization.isAuthorizedServiceMiddleware, // Temporarily disabled for KEDA debugging
async (
_req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const queueSize: number = await ProbeIngestQueueService.getQueueSize();
res.setHeader("Content-Type", "application/json");
res.status(200).json({
queueSize: queueSize,
});
} catch (err) {
return next(err);
}
},
);
export default router;

View File

@@ -18,13 +18,18 @@ import Express, {
NextFunction,
} from "Common/Server/Utils/Express";
import logger from "Common/Server/Utils/Logger";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import Response from "Common/Server/Utils/Response";
import GlobalConfig from "Common/Models/DatabaseModels/GlobalConfig";
import Probe from "Common/Models/DatabaseModels/Probe";
import User from "Common/Models/DatabaseModels/User";
import MonitorTestService from "Common/Server/Services/MonitorTestService";
import ProbeIngestQueueService from "../Services/Queue/ProbeIngestQueueService";
import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
import PositiveNumber from "Common/Types/PositiveNumber";
import MonitorProbeService from "Common/Server/Services/MonitorProbeService";
import QueryHelper from "Common/Server/Types/Database/QueryHelper";
import OneUptimeDate from "Common/Types/Date";
import MonitorService from "Common/Server/Services/MonitorService";
import { IsBillingEnabled } from "Common/Server/EnvironmentConfig";
const router: ExpressRouter = Express.getRouter();
@@ -200,28 +205,35 @@ router.post(
}
// now send an email to all the emailsToNotify
for (const email of emailsToNotify) {
MailService.sendMail(
{
toEmail: email,
templateType: EmailTemplateType.ProbeOffline,
subject: "ACTION REQUIRED: Probe Offline Notification",
vars: {
probeName: probe.name || "",
probeDescription: probe.description || "",
projectId: probe.projectId?.toString() || "",
probeId: probe.id?.toString() || "",
hostname: statusReport["hostname"]?.toString() || "",
emailReason: emailReason,
issue: issue,
// Skip sending email if billing is enabled
if (!IsBillingEnabled) {
for (const email of emailsToNotify) {
MailService.sendMail(
{
toEmail: email,
templateType: EmailTemplateType.ProbeOffline,
subject: "ACTION REQUIRED: Probe Offline Notification",
vars: {
probeName: probe.name || "",
probeDescription: probe.description || "",
projectId: probe.projectId?.toString() || "",
probeId: probe.id?.toString() || "",
hostname: statusReport["hostname"]?.toString() || "",
emailReason: emailReason,
issue: issue,
},
},
},
{
projectId: probe.projectId,
},
).catch((err: Error) => {
logger.error(err);
});
{
projectId: probe.projectId,
},
).catch((err: Error) => {
logger.error(err);
});
}
} else {
logger.debug(
"Billing is enabled, skipping probe offline email notification",
);
}
}
@@ -255,17 +267,18 @@ router.post(
);
}
// this is when the resource was ingested.
probeResponse.ingestedAt = OneUptimeDate.getCurrentDate();
MonitorResourceUtil.monitorResource(probeResponse).catch((err: Error) => {
logger.error("Error in monitor resource");
logger.error(err);
});
return Response.sendJsonObjectResponse(req, res, {
// Return response immediately
Response.sendJsonObjectResponse(req, res, {
result: "processing",
});
// Add to queue for asynchronous processing
await ProbeIngestQueueService.addProbeIngestJob({
probeMonitorResponse: req.body,
jobType: "probe-response",
});
return;
} catch (err) {
return next(err);
}
@@ -303,28 +316,156 @@ router.post(
);
}
probeResponse.ingestedAt = OneUptimeDate.getCurrentDate();
// Return response immediately
Response.sendEmptySuccessResponse(req, res);
// save the probe response to the monitor test.
// Add to queue for asynchronous processing
await ProbeIngestQueueService.addProbeIngestJob({
probeMonitorResponse: req.body,
jobType: "monitor-test",
testId: testId.toString(),
});
await MonitorTestService.updateOneById({
id: testId,
data: {
monitorStepProbeResponse: {
[probeResponse.monitorStepId.toString()]: {
...JSON.parse(JSON.stringify(probeResponse)),
monitoredAt: OneUptimeDate.getCurrentDate(),
},
} as any,
return;
} catch (err) {
return next(err);
}
},
);
// Queue stats endpoint
router.get(
"/probe/queue/stats",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const stats: {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
} = await ProbeIngestQueueService.getQueueStats();
return Response.sendJsonObjectResponse(req, res, stats);
} catch (err) {
return next(err);
}
},
);
// Queue size endpoint
router.get(
"/probe/queue/size",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const size: number = await ProbeIngestQueueService.getQueueSize();
return Response.sendJsonObjectResponse(req, res, { size });
} catch (err) {
return next(err);
}
},
);
// Queue size endpoint for Keda autoscaling (returns pending monitors count for specific probe)
router.post(
"/metrics/queue-size",
ProbeAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// This endpoint returns the number of monitors pending for the specific probe
// to be used by Keda for autoscaling probe replicas
// Get the probe ID from the authenticated request
const data: JSONObject = req.body;
const probeId: ObjectID = new ObjectID(data["probeId"] as string);
if (!probeId) {
return Response.sendErrorResponse(
req,
res,
new BadDataException("Probe ID not found"),
);
}
// Get pending monitor count for this specific probe
const pendingCount: PositiveNumber = await MonitorProbeService.countBy({
query: {
probeId: probeId,
isEnabled: true,
nextPingAt: QueryHelper.lessThanEqualToOrNull(
OneUptimeDate.getSomeMinutesAgo(2),
),
monitor: {
...MonitorService.getEnabledMonitorQuery(),
},
project: {
...ProjectService.getActiveProjectStatusQuery(),
},
},
props: {
isRoot: true,
},
});
// send success response.
return Response.sendJsonObjectResponse(req, res, {
queueSize: pendingCount.toNumber(),
});
} catch (err) {
return next(err);
}
},
);
return Response.sendEmptySuccessResponse(req, res);
// Queue failed jobs endpoint
router.get(
"/probe/queue/failed",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// Parse pagination parameters from query string
const start: number = parseInt(req.query["start"] as string) || 0;
const end: number = parseInt(req.query["end"] as string) || 100;
const failedJobs: Array<{
id: string;
name: string;
data: any;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}> = await ProbeIngestQueueService.getFailedJobs({
start,
end,
});
return Response.sendJsonObjectResponse(req, res, {
failedJobs,
pagination: {
start,
end,
count: failedJobs.length,
},
});
} catch (err) {
return next(err);
}

View File

@@ -1,6 +1,7 @@
import MonitorAPI from "./API/Monitor";
import ProbeIngest from "./API/Probe";
import RegisterAPI from "./API/Register";
import MetricsAPI from "./API/Metrics";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
import PostgresAppInstance from "Common/Server/Infrastructure/PostgresDatabase";
@@ -11,6 +12,7 @@ import logger from "Common/Server/Utils/Logger";
import Realtime from "Common/Server/Utils/Realtime";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import "./Jobs/ProbeIngest/ProcessProbeIngest";
import "ejs";
const app: ExpressApplication = Express.getExpressApp();
@@ -21,6 +23,7 @@ const APP_NAME: string = "probe-ingest";
app.use([`/${APP_NAME}`, "/ingestor", "/"], RegisterAPI);
app.use([`/${APP_NAME}`, "/ingestor", "/"], MonitorAPI);
app.use([`/${APP_NAME}`, "/ingestor", "/"], ProbeIngest);
app.use([`/${APP_NAME}`, "/"], MetricsAPI);
const init: PromiseVoidFunction = async (): Promise<void> => {
try {

View File

@@ -0,0 +1,82 @@
import { ProbeIngestJobData } from "../../Services/Queue/ProbeIngestQueueService";
import logger from "Common/Server/Utils/Logger";
import { QueueJob, QueueName } from "Common/Server/Infrastructure/Queue";
import QueueWorker from "Common/Server/Infrastructure/QueueWorker";
import BadDataException from "Common/Types/Exception/BadDataException";
import JSONFunctions from "Common/Types/JSONFunctions";
import ObjectID from "Common/Types/ObjectID";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import OneUptimeDate from "Common/Types/Date";
import MonitorTestService from "Common/Server/Services/MonitorTestService";
import ProbeMonitorResponse from "Common/Types/Probe/ProbeMonitorResponse";
import { JSONObject } from "Common/Types/JSON";
// Set up the worker for processing probe ingest queue
QueueWorker.getWorker(
QueueName.ProbeIngest,
async (job: QueueJob): Promise<void> => {
logger.debug(`Processing probe ingestion job: ${job.name}`);
try {
const jobData: ProbeIngestJobData = job.data as ProbeIngestJobData;
await processProbeFromQueue(jobData);
logger.debug(`Successfully processed probe ingestion job: ${job.name}`);
} catch (error) {
logger.error(`Error processing probe ingestion job:`);
logger.error(error);
throw error;
}
},
{ concurrency: 20 }, // Process up to 20 probe ingest jobs concurrently
);
async function processProbeFromQueue(
jobData: ProbeIngestJobData,
): Promise<void> {
const probeResponse: ProbeMonitorResponse = JSONFunctions.deserialize(
jobData.probeMonitorResponse["probeMonitorResponse"] as JSONObject,
) as any;
if (!probeResponse) {
throw new BadDataException("ProbeMonitorResponse not found");
}
// this is when the resource was ingested.
probeResponse.ingestedAt = OneUptimeDate.getCurrentDate();
if (jobData.jobType === "probe-response") {
// Handle regular probe response
await MonitorResourceUtil.monitorResource(probeResponse);
} else if (jobData.jobType === "monitor-test" && jobData.testId) {
// Handle monitor test response
const testId: ObjectID = new ObjectID(jobData.testId);
if (!testId) {
throw new BadDataException("TestId not found");
}
probeResponse.ingestedAt = OneUptimeDate.getCurrentDate();
// save the probe response to the monitor test.
await MonitorTestService.updateOneById({
id: testId,
data: {
monitorStepProbeResponse: {
[probeResponse.monitorStepId.toString()]: {
...JSON.parse(JSON.stringify(probeResponse)),
monitoredAt: OneUptimeDate.getCurrentDate(),
},
} as any,
},
props: {
isRoot: true,
},
});
} else {
throw new BadDataException(`Invalid job type: ${jobData.jobType}`);
}
}
logger.debug("Probe ingest worker initialized");

View File

@@ -0,0 +1,76 @@
import Queue, { QueueName } from "Common/Server/Infrastructure/Queue";
import { JSONObject } from "Common/Types/JSON";
import OneUptimeDate from "Common/Types/Date";
import logger from "Common/Server/Utils/Logger";
export interface ProbeIngestJobData {
probeMonitorResponse: JSONObject;
jobType: "probe-response" | "monitor-test";
testId?: string | undefined;
ingestionTimestamp: Date;
}
export default class ProbeIngestQueueService {
public static async addProbeIngestJob(data: {
probeMonitorResponse: JSONObject;
jobType: "probe-response" | "monitor-test";
testId?: string;
}): Promise<void> {
try {
const jobData: ProbeIngestJobData = {
probeMonitorResponse: data.probeMonitorResponse,
jobType: data.jobType,
testId: data.testId,
ingestionTimestamp: OneUptimeDate.getCurrentDate(),
};
const jobId: string = `probe-${data.jobType}-${data.testId || "general"}-${OneUptimeDate.getCurrentDateAsUnixNano()}`;
await Queue.addJob(
QueueName.ProbeIngest,
jobId,
"ProcessProbeIngest",
jobData as unknown as JSONObject,
);
logger.debug(`Added probe ingestion job: ${jobId}`);
} catch (error) {
logger.error(`Error adding probe ingestion job:`);
logger.error(error);
throw error;
}
}
public static async getQueueSize(): Promise<number> {
return Queue.getQueueSize(QueueName.ProbeIngest);
}
public static async getQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
}> {
return Queue.getQueueStats(QueueName.ProbeIngest);
}
public static getFailedJobs(options?: {
start?: number;
end?: number;
}): Promise<
Array<{
id: string;
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}>
> {
return Queue.getFailedJobs(QueueName.ProbeIngest, options);
}
}

View File

@@ -0,0 +1,38 @@
import Express, {
ExpressRequest,
ExpressResponse,
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import ServerMonitorIngestQueueService from "../Services/Queue/ServerMonitorIngestQueueService";
// import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
/**
* JSON metrics endpoint for KEDA autoscaling
* Returns queue size as JSON for KEDA metrics-api scaler
*/
router.get(
"/metrics/queue-size",
// ClusterKeyAuthorization.isAuthorizedServiceMiddleware, // Temporarily disabled for KEDA debugging
async (
_req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const queueSize: number =
await ServerMonitorIngestQueueService.getQueueSize();
res.setHeader("Content-Type", "application/json");
res.status(200).json({
queueSize: queueSize,
});
} catch (err) {
return next(err);
}
},
);
export default router;

View File

@@ -1,8 +1,6 @@
import BadDataException from "Common/Types/Exception/BadDataException";
import { JSONObject } from "Common/Types/JSON";
import JSONFunctions from "Common/Types/JSONFunctions";
import MonitorType from "Common/Types/Monitor/MonitorType";
import ServerMonitorResponse from "Common/Types/Monitor/ServerMonitor/ServerMonitorResponse";
import ObjectID from "Common/Types/ObjectID";
import MonitorService from "Common/Server/Services/MonitorService";
import Express, {
@@ -11,11 +9,11 @@ import Express, {
ExpressRouter,
NextFunction,
} from "Common/Server/Utils/Express";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import Response from "Common/Server/Utils/Response";
import Monitor from "Common/Models/DatabaseModels/Monitor";
import OneUptimeDate from "Common/Types/Date";
import ProjectService from "Common/Server/Services/ProjectService";
import ServerMonitorIngestQueueService from "../Services/Queue/ServerMonitorIngestQueueService";
import ClusterKeyAuthorization from "Common/Server/Middleware/ClusterKeyAuthorization";
const router: ExpressRouter = Express.getRouter();
@@ -77,52 +75,101 @@ router.post(
throw new BadDataException("Invalid Secret Key");
}
const monitor: Monitor | null = await MonitorService.findOneBy({
query: {
serverMonitorSecretKey: new ObjectID(monitorSecretKeyAsString),
monitorType: MonitorType.Server,
...MonitorService.getEnabledMonitorQuery(),
project: {
...ProjectService.getActiveProjectStatusQuery(),
},
},
select: {
_id: true,
},
props: {
isRoot: true,
},
});
if (!monitor) {
throw new BadDataException("Monitor not found");
}
// return the response early.
Response.sendEmptySuccessResponse(req, res);
// now process this request.
// Add to queue for asynchronous processing
await ServerMonitorIngestQueueService.addServerMonitorIngestJob({
secretKey: monitorSecretKeyAsString,
serverMonitorResponse: req.body as JSONObject,
});
const serverMonitorResponse: ServerMonitorResponse =
JSONFunctions.deserialize(
req.body["serverMonitorResponse"] as JSONObject,
) as any;
return;
} catch (err) {
return next(err);
}
},
);
if (!serverMonitorResponse) {
throw new BadDataException("Invalid Server Monitor Response");
}
// Queue stats endpoint
router.get(
"/server-monitor/queue/stats",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const stats: {
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
} = await ServerMonitorIngestQueueService.getQueueStats();
return Response.sendJsonObjectResponse(req, res, stats);
} catch (err) {
return next(err);
}
},
);
if (!monitor.id) {
throw new BadDataException("Monitor id not found");
}
// Queue size endpoint
router.get(
"/server-monitor/queue/size",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
const size: number = await ServerMonitorIngestQueueService.getQueueSize();
return Response.sendJsonObjectResponse(req, res, { size });
} catch (err) {
return next(err);
}
},
);
serverMonitorResponse.monitorId = monitor.id;
// Queue failed jobs endpoint
router.get(
"/server-monitor/queue/failed",
ClusterKeyAuthorization.isAuthorizedServiceMiddleware,
async (
req: ExpressRequest,
res: ExpressResponse,
next: NextFunction,
): Promise<void> => {
try {
// Parse pagination parameters from query string
const start: number = parseInt(req.query["start"] as string) || 0;
const end: number = parseInt(req.query["end"] as string) || 100;
serverMonitorResponse.requestReceivedAt = OneUptimeDate.getCurrentDate();
serverMonitorResponse.timeNow = OneUptimeDate.getCurrentDate();
const failedJobs: Array<{
id: string;
name: string;
data: any;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}> = await ServerMonitorIngestQueueService.getFailedJobs({
start,
end,
});
// process probe response here.
await MonitorResourceUtil.monitorResource(serverMonitorResponse);
return Response.sendJsonObjectResponse(req, res, {
failedJobs,
pagination: {
start,
end,
count: failedJobs.length,
},
});
} catch (err) {
return next(err);
}

View File

@@ -1,4 +1,5 @@
import ServerMonitorAPI from "./API/ServerMonitor";
import MetricsAPI from "./API/Metrics";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
import PostgresAppInstance from "Common/Server/Infrastructure/PostgresDatabase";
@@ -9,12 +10,14 @@ import logger from "Common/Server/Utils/Logger";
import Realtime from "Common/Server/Utils/Realtime";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import "./Jobs/ServerMonitorIngest/ProcessServerMonitorIngest";
const app: ExpressApplication = Express.getExpressApp();
const APP_NAME: string = "server-monitor-ingest";
app.use([`/${APP_NAME}`, "/"], ServerMonitorAPI);
app.use([`/${APP_NAME}`, "/"], MetricsAPI);
const init: PromiseVoidFunction = async (): Promise<void> => {
try {

View File

@@ -0,0 +1,92 @@
import { ServerMonitorIngestJobData } from "../../Services/Queue/ServerMonitorIngestQueueService";
import logger from "Common/Server/Utils/Logger";
import { QueueJob, QueueName } from "Common/Server/Infrastructure/Queue";
import QueueWorker from "Common/Server/Infrastructure/QueueWorker";
import BadDataException from "Common/Types/Exception/BadDataException";
import { JSONObject } from "Common/Types/JSON";
import JSONFunctions from "Common/Types/JSONFunctions";
import MonitorType from "Common/Types/Monitor/MonitorType";
import ServerMonitorResponse from "Common/Types/Monitor/ServerMonitor/ServerMonitorResponse";
import ObjectID from "Common/Types/ObjectID";
import MonitorService from "Common/Server/Services/MonitorService";
import MonitorResourceUtil from "Common/Server/Utils/Monitor/MonitorResource";
import Monitor from "Common/Models/DatabaseModels/Monitor";
import OneUptimeDate from "Common/Types/Date";
import ProjectService from "Common/Server/Services/ProjectService";
// Set up the worker for processing server monitor ingest queue
QueueWorker.getWorker(
QueueName.ServerMonitorIngest,
async (job: QueueJob): Promise<void> => {
logger.debug(`Processing server monitor ingestion job: ${job.name}`);
try {
const jobData: ServerMonitorIngestJobData =
job.data as ServerMonitorIngestJobData;
await processServerMonitorFromQueue(jobData);
logger.debug(
`Successfully processed server monitor ingestion job: ${job.name}`,
);
} catch (error) {
logger.error(`Error processing server monitor ingestion job:`);
logger.error(error);
throw error;
}
},
{ concurrency: 20 }, // Process up to 20 server monitor ingest jobs concurrently
);
async function processServerMonitorFromQueue(
jobData: ServerMonitorIngestJobData,
): Promise<void> {
const monitorSecretKeyAsString: string = jobData.secretKey;
if (!monitorSecretKeyAsString) {
throw new BadDataException("Invalid Secret Key");
}
const monitor: Monitor | null = await MonitorService.findOneBy({
query: {
serverMonitorSecretKey: new ObjectID(monitorSecretKeyAsString),
monitorType: MonitorType.Server,
...MonitorService.getEnabledMonitorQuery(),
project: {
...ProjectService.getActiveProjectStatusQuery(),
},
},
select: {
_id: true,
},
props: {
isRoot: true,
},
});
if (!monitor) {
throw new BadDataException("Monitor not found");
}
const serverMonitorResponse: ServerMonitorResponse =
JSONFunctions.deserialize(
jobData.serverMonitorResponse["serverMonitorResponse"] as JSONObject,
) as any;
if (!serverMonitorResponse) {
throw new BadDataException("Invalid Server Monitor Response");
}
if (!monitor.id) {
throw new BadDataException("Monitor id not found");
}
serverMonitorResponse.monitorId = monitor.id;
serverMonitorResponse.requestReceivedAt = OneUptimeDate.getCurrentDate();
serverMonitorResponse.timeNow = OneUptimeDate.getCurrentDate();
// process probe response here.
await MonitorResourceUtil.monitorResource(serverMonitorResponse);
}
logger.debug("Server monitor ingest worker initialized");

View File

@@ -0,0 +1,73 @@
import Queue, { QueueName } from "Common/Server/Infrastructure/Queue";
import { JSONObject } from "Common/Types/JSON";
import OneUptimeDate from "Common/Types/Date";
import logger from "Common/Server/Utils/Logger";
export interface ServerMonitorIngestJobData {
secretKey: string;
serverMonitorResponse: JSONObject;
ingestionTimestamp: Date;
}
export default class ServerMonitorIngestQueueService {
public static async addServerMonitorIngestJob(data: {
secretKey: string;
serverMonitorResponse: JSONObject;
}): Promise<void> {
try {
const jobData: ServerMonitorIngestJobData = {
secretKey: data.secretKey,
serverMonitorResponse: data.serverMonitorResponse,
ingestionTimestamp: OneUptimeDate.getCurrentDate(),
};
const jobId: string = `server-monitor-${data.secretKey}-${OneUptimeDate.getCurrentDateAsUnixNano()}`;
await Queue.addJob(
QueueName.ServerMonitorIngest,
jobId,
"ProcessServerMonitorIngest",
jobData as unknown as JSONObject,
);
logger.debug(`Added server monitor ingestion job: ${jobId}`);
} catch (error) {
logger.error(`Error adding server monitor ingestion job:`);
logger.error(error);
throw error;
}
}
public static async getQueueSize(): Promise<number> {
return Queue.getQueueSize(QueueName.ServerMonitorIngest);
}
public static async getQueueStats(): Promise<{
waiting: number;
active: number;
completed: number;
failed: number;
delayed: number;
total: number;
}> {
return Queue.getQueueStats(QueueName.ServerMonitorIngest);
}
public static getFailedJobs(options?: {
start?: number;
end?: number;
}): Promise<
Array<{
id: string;
name: string;
data: JSONObject;
failedReason: string;
stackTrace?: string;
processedOn: Date | null;
finishedOn: Date | null;
attemptsMade: number;
}>
> {
return Queue.getFailedJobs(QueueName.ServerMonitorIngest, options);
}
}