Add utility classes for telemetry: Monitor, StackTrace, and Syslog parsing

- Implemented MonitorUtil for managing monitor secrets and populating them in monitor steps and tests.
- Created StackTraceParser to parse and structure stack traces from various programming languages.
- Developed SyslogParser to handle and parse syslog messages in both RFC 5424 and RFC 3164 formats.
This commit is contained in:
Nawaz Dhandala
2026-04-02 14:04:13 +01:00
parent 69c6b332c1
commit 5f398bdb31
99 changed files with 125 additions and 8756 deletions

View File

@@ -106,29 +106,6 @@ jobs:
max_attempts: 3
command: sudo docker build --no-cache -f ./Probe/Dockerfile .
docker-build-telemetry:
runs-on: ubuntu-latest
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Preinstall
uses: nick-fields/retry@v3
with:
timeout_minutes: 10
max_attempts: 3
command: npm run prerun
# build image probe api
- name: build docker image
uses: nick-fields/retry@v3
with:
timeout_minutes: 45
max_attempts: 3
command: sudo docker build --no-cache -f ./Telemetry/Dockerfile .
docker-build-test-server:
runs-on: ubuntu-latest
env:

View File

@@ -184,24 +184,6 @@ jobs:
max_attempts: 3
command: cd Probe && npm install && npm run compile && npm run dep-check
compile-telemetry:
runs-on: ubuntu-latest
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: latest
- run: cd Common && npm install
- name: Compile Telemetry
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: cd Telemetry && npm install && npm run compile && npm run dep-check
compile-status-page:
runs-on: ubuntu-latest
env:

View File

@@ -569,88 +569,6 @@ jobs:
--image test \
--tags "${SANITIZED_VERSION},enterprise-${SANITIZED_VERSION}"
telemetry-docker-image-build:
needs: [generate-build-number, read-version]
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- name: Free Disk Space (Ubuntu)
if: matrix.platform == 'linux/amd64'
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- uses: actions/setup-node@v4
with:
node-version: latest
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate Dockerfile from Dockerfile.tpl
run: npm run prerun
- name: Login to Docker Hub
run: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Build and push
run: |
bash ./Scripts/GHA/build_docker_images.sh \
--image telemetry \
--version "${{needs.read-version.outputs.major_minor}}" \
--dockerfile ./Telemetry/Dockerfile \
--context . \
--platforms ${{ matrix.platform }} \
--git-sha "${{ github.sha }}"
telemetry-docker-image-merge:
needs: [telemetry-docker-image-build, read-version]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
run: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Merge multi-arch manifests
run: |
VERSION="${{needs.read-version.outputs.major_minor}}"
SANITIZED_VERSION="${VERSION//+/-}"
bash ./Scripts/GHA/merge_docker_manifests.sh \
--image telemetry \
--tags "${SANITIZED_VERSION},enterprise-${SANITIZED_VERSION}"
probe-docker-image-build:
needs: [generate-build-number, read-version]
strategy:
@@ -992,7 +910,6 @@ jobs:
- home-docker-image-merge
- test-server-docker-image-merge
- test-docker-image-merge
- telemetry-docker-image-merge
- probe-docker-image-merge
- app-docker-image-merge
- ai-agent-docker-image-merge
@@ -1008,7 +925,6 @@ jobs:
"home",
"test-server",
"test",
"telemetry",
"probe",
"app",
"ai-agent"
@@ -1057,7 +973,7 @@ jobs:
test-e2e-release-saas:
runs-on: ubuntu-latest
needs: [telemetry-docker-image-merge, ai-agent-docker-image-merge, app-docker-image-merge, home-docker-image-merge, probe-docker-image-merge, test-docker-image-merge, test-server-docker-image-merge, publish-npm-packages, e2e-docker-image-merge, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-merge]
needs: [ai-agent-docker-image-merge, app-docker-image-merge, home-docker-image-merge, probe-docker-image-merge, test-docker-image-merge, test-server-docker-image-merge, publish-npm-packages, e2e-docker-image-merge, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-merge]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:
@@ -1188,7 +1104,7 @@ jobs:
test-e2e-release-self-hosted:
runs-on: ubuntu-latest
# After all the jobs runs
needs: [telemetry-docker-image-merge, ai-agent-docker-image-merge, app-docker-image-merge, home-docker-image-merge, probe-docker-image-merge, test-docker-image-merge, test-server-docker-image-merge, publish-npm-packages, e2e-docker-image-merge, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-merge]
needs: [ai-agent-docker-image-merge, app-docker-image-merge, home-docker-image-merge, probe-docker-image-merge, test-docker-image-merge, test-server-docker-image-merge, publish-npm-packages, e2e-docker-image-merge, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-merge]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:

View File

@@ -514,90 +514,6 @@ jobs:
--image test \
--tags "${SANITIZED_VERSION},test,enterprise-${SANITIZED_VERSION},enterprise-test"
telemetry-docker-image-build:
needs: [read-version, generate-build-number]
strategy:
matrix:
include:
- platform: linux/amd64
runner: ubuntu-latest
- platform: linux/arm64
runner: ubuntu-24.04-arm
runs-on: ${{ matrix.runner }}
steps:
- name: Free Disk Space (Ubuntu)
if: matrix.platform == 'linux/amd64'
uses: jlumbroso/free-disk-space@main
with:
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- uses: actions/setup-node@v4
with:
node-version: latest
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate Dockerfile from Dockerfile.tpl
run: npm run prerun
- name: Login to Docker Hub
run: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Build and push
run: |
bash ./Scripts/GHA/build_docker_images.sh \
--image telemetry \
--version "${{needs.read-version.outputs.major_minor}}-test" \
--dockerfile ./Telemetry/Dockerfile \
--context . \
--platforms ${{ matrix.platform }} \
--git-sha "${{ github.sha }}" \
--extra-tags test \
--extra-enterprise-tags enterprise-test
telemetry-docker-image-merge:
needs: [telemetry-docker-image-build, read-version]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
run: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Merge multi-arch manifests
run: |
VERSION="${{needs.read-version.outputs.major_minor}}-test"
SANITIZED_VERSION="${VERSION//+/-}"
bash ./Scripts/GHA/merge_docker_manifests.sh \
--image telemetry \
--tags "${SANITIZED_VERSION},test,enterprise-${SANITIZED_VERSION},enterprise-test"
probe-docker-image-build:
needs: [read-version, generate-build-number]
strategy:
@@ -867,7 +783,7 @@ jobs:
test-helm-chart:
runs-on: ubuntu-latest
needs: [infrastructure-agent-deploy, publish-terraform-provider, telemetry-docker-image-merge, home-docker-image-merge, test-server-docker-image-merge, test-docker-image-merge, probe-docker-image-merge, app-docker-image-merge, ai-agent-docker-image-merge, nginx-docker-image-merge, e2e-docker-image-merge]
needs: [infrastructure-agent-deploy, publish-terraform-provider, home-docker-image-merge, test-server-docker-image-merge, test-docker-image-merge, probe-docker-image-merge, app-docker-image-merge, ai-agent-docker-image-merge, nginx-docker-image-merge, e2e-docker-image-merge]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:

View File

@@ -1,22 +0,0 @@
name: Telemetry Test
on:
pull_request:
push:
branches-ignore:
- 'hotfix-*' # excludes hotfix branches
- 'release'
jobs:
test:
runs-on: ubuntu-latest
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: latest
- run: cd Common && npm install
- run: cd Telemetry && npm install && npm run test

View File

@@ -0,0 +1,77 @@
import OTelIngestAPI from "./API/OTelIngest";
import MetricsAPI from "./API/Metrics";
import SyslogAPI from "./API/Syslog";
import FluentAPI from "./API/Fluent";
import PyroscopeAPI from "./API/Pyroscope";
// ProbeIngest routes
import ProbeIngestRegisterAPI from "./API/ProbeIngest/Register";
import ProbeIngestMonitorAPI from "./API/ProbeIngest/Monitor";
import ProbeIngestAPI from "./API/ProbeIngest/Probe";
import IncomingEmailAPI from "./API/ProbeIngest/IncomingEmail";
// ServerMonitorIngest routes
import ServerMonitorAPI from "./API/ServerMonitorIngest/ServerMonitor";
// IncomingRequestIngest routes
import IncomingRequestAPI from "./API/IncomingRequestIngest/IncomingRequest";
import "./Jobs/TelemetryIngest/ProcessTelemetry";
import { TELEMETRY_CONCURRENCY } from "./Config";
import { startGrpcServer } from "./GrpcServer";
import FeatureSet from "Common/Server/Types/FeatureSet";
import Express, { ExpressApplication } from "Common/Server/Utils/Express";
import logger from "Common/Server/Utils/Logger";
const app: ExpressApplication = Express.getExpressApp();
const TELEMETRY_PREFIXES: Array<string> = ["/telemetry", "/"];
// Existing telemetry routes
app.use(TELEMETRY_PREFIXES, OTelIngestAPI);
app.use(TELEMETRY_PREFIXES, MetricsAPI);
app.use(TELEMETRY_PREFIXES, SyslogAPI);
app.use(TELEMETRY_PREFIXES, FluentAPI);
app.use(TELEMETRY_PREFIXES, PyroscopeAPI);
/*
* ProbeIngest routes under ["/probe-ingest", "/ingestor", "/"]
* "/ingestor" is used for backward compatibility because probes are already deployed with this path in client environments.
*/
const PROBE_INGEST_PREFIXES: Array<string> = [
"/probe-ingest",
"/ingestor",
"/",
];
app.use(PROBE_INGEST_PREFIXES, ProbeIngestRegisterAPI);
app.use(PROBE_INGEST_PREFIXES, ProbeIngestMonitorAPI);
app.use(PROBE_INGEST_PREFIXES, ProbeIngestAPI);
app.use(["/probe-ingest", "/"], IncomingEmailAPI);
// ServerMonitorIngest routes under ["/server-monitor-ingest", "/"]
const SERVER_MONITOR_PREFIXES: Array<string> = ["/server-monitor-ingest", "/"];
app.use(SERVER_MONITOR_PREFIXES, ServerMonitorAPI);
// IncomingRequestIngest routes under ["/incoming-request-ingest", "/"]
const INCOMING_REQUEST_PREFIXES: Array<string> = [
"/incoming-request-ingest",
"/",
];
app.use(INCOMING_REQUEST_PREFIXES, IncomingRequestAPI);
const TelemetryFeatureSet: FeatureSet = {
init: async (): Promise<void> => {
try {
logger.info(
`Telemetry Service - Queue concurrency: ${TELEMETRY_CONCURRENCY}`,
);
// Start gRPC OTLP server on port 4317
startGrpcServer();
} catch (err) {
logger.error("Telemetry FeatureSet Init Failed:");
logger.error(err);
throw err;
}
},
};
export default TelemetryFeatureSet;

View File

@@ -7,6 +7,7 @@ import IdentityRoutes from "./FeatureSet/Identity/Index";
import MCPRoutes from "./FeatureSet/MCP/Index";
import NotificationRoutes from "./FeatureSet/Notification/Index";
import WorkersRoutes from "./FeatureSet/Workers/Index";
import TelemetryRoutes from "./FeatureSet/Telemetry/Index";
import WorkflowRoutes from "./FeatureSet/Workflow/Index";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
@@ -111,6 +112,7 @@ const init: PromiseVoidFunction = async (): Promise<void> => {
await DocsRoutes.init();
await APIReferenceRoutes.init();
await WorkersRoutes.init();
await TelemetryRoutes.init();
await WorkflowRoutes.init();
// Add default routes to the app

View File

@@ -38,12 +38,15 @@
"author": "OneUptime <hello@oneuptime.com> (https://oneuptime.com/)",
"license": "Apache-2.0",
"dependencies": {
"@grpc/grpc-js": "^1.12.5",
"@grpc/proto-loader": "^0.7.13",
"@modelcontextprotocol/sdk": "^1.27.1",
"@sendgrid/mail": "^8.1.0",
"Common": "file:../Common",
"ejs": "^3.1.9",
"handlebars": "^4.7.8",
"nodemailer": "^6.9.7",
"protobufjs": "^7.3.2",
"ts-node": "^10.9.1",
"twilio": "^4.20.0",
"xml-crypto": "^3.2.0",

View File

@@ -177,8 +177,8 @@ export const AppApiHostname: Hostname = Hostname.fromString(
);
export const OpenTelemetryIngestHostname: Hostname = Hostname.fromString(
`${process.env["SERVER_TELEMETRY_HOSTNAME"] || "localhost"}:${
process.env["TELEMETRY_PORT"] || 80
`${process.env["SERVER_APP_HOSTNAME"] || "localhost"}:${
process.env["APP_PORT"] || 80
}`,
);

View File

@@ -117,13 +117,13 @@ Usage:
- name: SERVER_APP_HOSTNAME
value: {{ $.Release.Name }}-app.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: TELEMETRY_HOSTNAME
value: {{ $.Release.Name }}-telemetry.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
value: {{ $.Release.Name }}-app.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: SERVER_TELEMETRY_HOSTNAME
value: {{ $.Release.Name }}-telemetry.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
value: {{ $.Release.Name }}-app.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: APP_PORT
value: {{ $.Values.app.ports.http | squote }}
- name: TELEMETRY_PORT
value: {{ $.Values.telemetry.ports.http | squote }}
value: {{ $.Values.app.ports.http | squote }}
- name: HOME_PORT
value: {{ $.Values.home.ports.http | squote }}
- name: WORKER_CONCURRENCY

View File

@@ -118,11 +118,16 @@ spec:
value: {{ $.Values.app.disableTelemetryCollection | quote }}
- name: ENABLE_PROFILING
value: {{ $.Values.app.enableProfiling | quote }}
- name: TELEMETRY_CONCURRENCY
value: {{ $.Values.app.telemetryConcurrency | default 100 | squote }}
{{- include "oneuptime.env.registerProbeKey" (dict "Values" $.Values "Release" $.Release) | nindent 12 }}
ports:
- containerPort: {{ $.Values.app.ports.http }}
protocol: TCP
name: http
- containerPort: {{ $.Values.app.ports.grpc | default 4317 }}
protocol: TCP
name: grpc
{{- if $.Values.app.resources }}
resources:
{{- toYaml $.Values.app.resources | nindent 12 }}
@@ -141,7 +146,7 @@ spec:
{{- if and $.Values.app.enabled (not $.Values.deployment.disableDeployments) }}
# OneUptime app Service
{{- $appPorts := dict "port" $.Values.app.ports.http -}}
{{- $appPorts := dict "http" $.Values.app.ports.http "grpc" ($.Values.app.ports.grpc | default 4317) -}}
{{- $appServiceArgs := dict "ServiceName" "app" "Ports" $appPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $appServiceArgs }}
---

View File

@@ -2,13 +2,6 @@
KEDA ScaledObjects for various services
*/}}
{{/* Telemetry KEDA ScaledObject */}}
{{- if and .Values.keda.enabled .Values.telemetry.enabled .Values.telemetry.keda.enabled (not .Values.telemetry.disableAutoscaler) (not .Values.deployment.disableDeployments) }}
{{- $metricsConfig := dict "enabled" .Values.telemetry.keda.enabled "minReplicas" .Values.telemetry.keda.minReplicas "maxReplicas" .Values.telemetry.keda.maxReplicas "pollingInterval" .Values.telemetry.keda.pollingInterval "cooldownPeriod" .Values.telemetry.keda.cooldownPeriod "triggers" (list (dict "query" "oneuptime_telemetry_queue_size" "threshold" .Values.telemetry.keda.queueSizeThreshold "port" .Values.telemetry.ports.http)) }}
{{- $telemetryKedaArgs := dict "ServiceName" "telemetry" "Release" .Release "Values" .Values "MetricsConfig" $metricsConfig "DisableAutoscaler" .Values.telemetry.disableAutoscaler }}
{{- include "oneuptime.kedaScaledObject" $telemetryKedaArgs }}
{{- end }}
{{/* Probe KEDA ScaledObjects - one for each probe configuration */}}
{{- range $key, $val := $.Values.probes }}
{{- $probeEnabled := or (not (hasKey $val "enabled")) $val.enabled }}

View File

@@ -1 +1 @@
{{- /* OTel Collector has been removed. Telemetry ingestion (gRPC + HTTP) is now handled directly by the telemetry service. */ -}}
{{- /* OTel Collector has been removed. Telemetry ingestion (gRPC + HTTP) is now handled directly by the app service. */ -}}

View File

@@ -86,7 +86,7 @@ spec:
- name: OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT
value: {{ $.Values.openTelemetryExporter.endpoint }}
- name: ONEUPTIME_URL
value: http://{{ $.Release.Name }}-telemetry.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}:{{ $.Values.telemetry.ports.http }}
value: http://{{ $.Release.Name }}-app.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}:{{ $.Values.app.ports.http }}
- name: PROBE_NAME
value: {{ $val.name }}
- name: PROBE_DESCRIPTION

View File

@@ -1,148 +0,0 @@
{{- if and $.Values.telemetry.enabled (not $.Values.deployment.disableDeployments) }}
# OneUptime telemetry Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" $.Release.Name "telemetry" }}
namespace: {{ $.Release.Namespace }}
labels:
app: {{ printf "%s-%s" $.Release.Name "telemetry" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
appname: oneuptime
{{- if $.Values.deployment.includeTimestampLabel }}
date: "{{ now | unixEpoch }}"
{{- end }}
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "telemetry" }}
{{- if $.Values.telemetry.replicaCount }}
replicas: {{ $.Values.telemetry.replicaCount }}
{{- else }}
{{- if or (not $.Values.autoscaling.enabled) ($.Values.telemetry.disableAutoscaler) }}
replicas: {{ $.Values.deployment.replicaCount }}
{{- end }}
{{- end }}
strategy: {{- toYaml $.Values.deployment.updateStrategy | nindent 4 }}
template:
metadata:
labels:
app: {{ printf "%s-%s" $.Release.Name "telemetry" }}
{{- if $.Values.deployment.includeTimestampLabel }}
date: "{{ now | unixEpoch }}"
{{- end }}
appname: oneuptime
spec:
volumes:
- name: greenlockrc
emptyDir:
sizeLimit: "1Gi"
{{- if $.Values.telemetry.podSecurityContext }}
securityContext:
{{- toYaml $.Values.telemetry.podSecurityContext | nindent 8 }}
{{- else if $.Values.podSecurityContext }}
securityContext:
{{- toYaml $.Values.podSecurityContext | nindent 8 }}
{{- end }}
{{- if $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
{{- end }}
{{- if $.Values.affinity }}
affinity: {{- $.Values.affinity | toYaml | nindent 8 }}
{{- end }}
{{- if $.Values.tolerations }}
tolerations: {{- $.Values.tolerations | toYaml | nindent 8 }}
{{- end }}
{{- if $.Values.telemetry.nodeSelector }}
nodeSelector:
{{- toYaml $.Values.telemetry.nodeSelector | nindent 8 }}
{{- else if $.Values.nodeSelector }}
nodeSelector:
{{- toYaml $.Values.nodeSelector | nindent 8 }}
{{- end }}
containers:
- image: {{ include "oneuptime.image" (dict "Values" $.Values "ServiceName" "telemetry") }}
name: {{ printf "%s-%s" $.Release.Name "telemetry" }}
{{- if $.Values.startupProbe.enabled }}
# Startup probe
startupProbe:
httpGet:
path: /status/live
port: {{ $.Values.telemetry.ports.http }}
periodSeconds: {{ $.Values.startupProbe.periodSeconds }}
failureThreshold: {{ $.Values.startupProbe.failureThreshold }}
{{- end }}
{{- if $.Values.livenessProbe.enabled }}
# Liveness probe
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.telemetry.ports.http }}
periodSeconds: {{ $.Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ $.Values.livenessProbe.timeoutSeconds }}
initialDelaySeconds: {{ $.Values.livenessProbe.initialDelaySeconds }}
{{- end }}
{{- if $.Values.readinessProbe.enabled }}
# Readyness Probe
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.telemetry.ports.http }}
periodSeconds: {{ $.Values.readinessProbe.periodSeconds }}
initialDelaySeconds: {{ $.Values.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ $.Values.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if $.Values.telemetry.containerSecurityContext }}
securityContext:
{{- toYaml $.Values.telemetry.containerSecurityContext | nindent 12 }}
{{- else if $.Values.containerSecurityContext }}
securityContext:
{{- toYaml $.Values.containerSecurityContext | nindent 12 }}
{{- end }}
imagePullPolicy: {{ $.Values.image.pullPolicy }}
env:
{{- include "oneuptime.env.common" . | nindent 12 }}
{{- include "oneuptime.env.runtime" (dict "Values" $.Values "Release" $.Release) | nindent 12 }}
- name: PORT
value: {{ $.Values.telemetry.ports.http | quote }}
- name: DISABLE_TELEMETRY
value: {{ $.Values.telemetry.disableTelemetryCollection | quote }}
- name: ENABLE_PROFILING
value: {{ $.Values.telemetry.enableProfiling | quote }}
- name: TELEMETRY_CONCURRENCY
value: {{ $.Values.telemetry.concurrency | squote }}
{{- include "oneuptime.env.registerProbeKey" (dict "Values" $.Values "Release" $.Release) | nindent 12 }}
ports:
- containerPort: {{ $.Values.telemetry.ports.http }}
protocol: TCP
name: http
- containerPort: {{ $.Values.telemetry.ports.grpc }}
protocol: TCP
name: grpc
{{- if $.Values.telemetry.resources }}
resources:
{{- toYaml $.Values.telemetry.resources | nindent 12 }}
{{- end }}
restartPolicy: {{ $.Values.image.restartPolicy }}
---
# OneUptime telemetry autoscaler
{{- if and (not $.Values.telemetry.disableAutoscaler) (not (and $.Values.keda.enabled $.Values.telemetry.keda.enabled)) }}
{{- $telemetryAutoScalerArgs := dict "ServiceName" "telemetry" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $telemetryAutoScalerArgs }}
{{- end }}
{{- end }}
---
{{- if and $.Values.telemetry.enabled (not $.Values.deployment.disableDeployments) }}
# OneUptime telemetry Service
{{- $telemetryPorts := dict "http" $.Values.telemetry.ports.http "grpc" $.Values.telemetry.ports.grpc -}}
{{- $telemetryServiceArgs := dict "ServiceName" "telemetry" "Ports" $telemetryPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $telemetryServiceArgs }}
---
{{- end }}

View File

@@ -1755,49 +1755,7 @@
"workerConcurrency": {
"type": "integer"
},
"disableTelemetryCollection": {
"type": "boolean"
},
"disableAutoscaler": {
"type": "boolean"
},
"ports": {
"type": "object",
"properties": {
"http": {
"type": "integer"
}
},
"additionalProperties": false
},
"resources": {
"type": [
"object",
"null"
]
},
"nodeSelector": {
"type": "object"
},
"podSecurityContext": {
"type": "object"
},
"containerSecurityContext": {
"type": "object"
},
"enableProfiling": {
"type": "boolean"
}
},
"additionalProperties": false
},
"telemetry": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"replicaCount": {
"telemetryConcurrency": {
"type": "integer"
},
"disableTelemetryCollection": {
@@ -1806,9 +1764,6 @@
"disableAutoscaler": {
"type": "boolean"
},
"concurrency": {
"type": "integer"
},
"ports": {
"type": "object",
"properties": {
@@ -1816,7 +1771,8 @@
"type": "integer"
},
"grpc": {
"type": "integer"
"type": "integer",
"default": 4317
}
},
"additionalProperties": false
@@ -1836,30 +1792,6 @@
"containerSecurityContext": {
"type": "object"
},
"keda": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean"
},
"minReplicas": {
"type": "integer"
},
"maxReplicas": {
"type": "integer"
},
"queueSizeThreshold": {
"type": "integer"
},
"pollingInterval": {
"type": "integer"
},
"cooldownPeriod": {
"type": "integer"
}
},
"additionalProperties": false
},
"enableProfiling": {
"type": "boolean"
}

View File

@@ -650,42 +650,18 @@ app:
enabled: true
replicaCount: 1
workerConcurrency: 100
# Max concurrent telemetry ingestion jobs processed by each pod
telemetryConcurrency: 100
disableTelemetryCollection: false
enableProfiling: false
disableAutoscaler: false
ports:
http: 3002
resources:
nodeSelector: {}
podSecurityContext: {}
containerSecurityContext: {}
telemetry:
enabled: true
replicaCount: 1
disableTelemetryCollection: false
enableProfiling: false
disableAutoscaler: false
# Max concurrent telemetry jobs processed by each pod
concurrency: 100
ports:
http: 3403
grpc: 4317
resources:
nodeSelector: {}
podSecurityContext: {}
containerSecurityContext: {}
# KEDA autoscaling configuration based on queue metrics
keda:
enabled: false
minReplicas: 1
maxReplicas: 100
# Scale up when queue size exceeds this threshold
queueSizeThreshold: 100
# Polling interval for metrics (in seconds)
pollingInterval: 30
# Cooldown period after scaling (in seconds)
cooldownPeriod: 300
# AI Agent Configuration
# Deploy this to run an AI Agent within your Kubernetes cluster

View File

@@ -522,7 +522,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /telemetry {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -533,7 +533,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /incoming-request-ingest {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -545,7 +545,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /otlp {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -562,7 +562,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
# Pyroscope profiling ingestion endpoint
location /pyroscope {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -573,7 +573,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location ~ /opentelemetry.proto.collector* {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_otel_grpc grpc://${SERVER_TELEMETRY_HOSTNAME}:4317;
set $backend_otel_grpc grpc://${SERVER_APP_HOSTNAME}:4317;
grpc_pass $backend_otel_grpc;
}
@@ -602,7 +602,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /fluentd/logs {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -619,7 +619,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /syslog/v1/logs {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -634,7 +634,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /probe-ingest {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -652,7 +652,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
# For backward compatibility with probes that are already deployed
location /ingestor {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -669,7 +669,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /server-monitor {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -914,7 +914,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /heartbeat {
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
@@ -933,7 +933,7 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
location /incoming-email {
# Incoming Email Monitor webhook endpoint
resolver ${NGINX_RESOLVER} valid=30s;
set $backend_telemetry http://${SERVER_TELEMETRY_HOSTNAME}:${TELEMETRY_PORT};
set $backend_telemetry http://${SERVER_APP_HOSTNAME}:${APP_PORT};
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

View File

@@ -1,56 +0,0 @@
.git
node_modules
# See https://help.github.com/ignore-files/ for more about ignoring files.
# dependencies
/node_modules
node_modules
.idea
# testing
/coverage
# production
/build
# misc
.DS_Store
env.js
npm-debug.log*
yarn-debug.log*
yarn-error.log*
yarn.lock
Untitled-1
*.local.sh
*.local.yaml
run
stop
nohup.out*
encrypted-credentials.tar
encrypted-credentials/
_README.md
# Important Add production values to gitignore.
values-saas-production.yaml
kubernetes/values-saas-production.yaml
/private
/tls_cert.pem
/tls_key.pem
/keys
temp_readme.md
tests/coverage
settings.json
GoSDK/tester/

View File

@@ -1 +0,0 @@
*.js text eol=lf

16
Telemetry/.gitignore vendored
View File

@@ -1,16 +0,0 @@
# See https://help.github.com/ignore-files/ for more about ignoring files.
# dependencies
#/backend/node_modules
/kubernetes
/node_modules
.idea
# misc
.DS_Store
npm-debug.log*
yarn-debug.log*
yarn-error.log*
yarn.lock

View File

@@ -1,83 +0,0 @@
#
# OneUptime-Telemetry Dockerfile
#
# Pull base image nodejs image.
FROM public.ecr.aws/docker/library/node:23.8
RUN mkdir /tmp/npm && chmod 2777 /tmp/npm && chown 1000:1000 /tmp/npm && npm config set cache /tmp/npm --global
RUN npm config set fetch-retries 5
RUN npm config set fetch-retry-mintimeout 20000
RUN npm config set fetch-retry-maxtimeout 60000
ARG GIT_SHA
ARG APP_VERSION
ARG IS_ENTERPRISE_EDITION=false
ENV GIT_SHA=${GIT_SHA}
ENV APP_VERSION=${APP_VERSION}
ENV IS_ENTERPRISE_EDITION=${IS_ENTERPRISE_EDITION}
ENV PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD=1
ENV NODE_OPTIONS="--use-openssl-ca"
## Add Intermediate Certs
COPY ./SslCertificates /usr/local/share/ca-certificates
RUN update-ca-certificates
# IF APP_VERSION is not set, set it to 1.0.0
RUN if [ -z "$APP_VERSION" ]; then export APP_VERSION=1.0.0; fi
RUN apt-get update
# Install bash.
RUN apt-get install bash -y && apt-get install curl -y && apt-get install iputils-ping -y
# Install python
RUN apt-get update && apt-get install -y .gyp python3 make g++
#Use bash shell by default
SHELL ["/bin/bash", "-c"]
# Install iputils
RUN apt-get install net-tools -y
RUN mkdir -p /usr/src
WORKDIR /usr/src/Common
COPY ./Common/package*.json /usr/src/Common/
# Set version in ./Common/package.json to the APP_VERSION
RUN sed -i "s/\"version\": \".*\"/\"version\": \"$APP_VERSION\"/g" /usr/src/Common/package.json
RUN npm install
COPY ./Common /usr/src/Common
ENV PRODUCTION=true
WORKDIR /usr/src/app
# Install app dependencies
COPY ./Telemetry/package*.json /usr/src/app/
RUN npm install
# Expose ports.
# - 3403: OneUptime-Telemetry
EXPOSE 3403
{{ if eq .Env.ENVIRONMENT "development" }}
#Run the app
CMD [ "npm", "run", "dev" ]
{{ else }}
# Copy app source
COPY ./Telemetry /usr/src/app
# Bundle app source
RUN npm run compile
# Set permission to write logs and cache in case container run as non root
RUN chown -R 1000:1000 "/tmp/npm" && chmod -R 2777 "/tmp/npm"
#Run the app
CMD [ "npm", "start" ]
{{ end }}

View File

@@ -1,16 +0,0 @@
# Syslog Testing Instructions
Test in terminal, send a sample RFC5424 payload:
```bash
curl -X POST \
-H "Content-Type: text/plain" \
-H "x-oneuptime-token: YOUR_TEST_TOKEN" \
-H "x-oneuptime-service-name: local-syslog" \
--data '<134>1 2024-07-10T17:25:43.123Z host app 1234 ID47 [exampleSDID@32473 iut="3" eventSource="App"] An application event log entry' \
https://oneuptime.com/syslog/v1/logs
```
Replace `YOUR_TEST_TOKEN` and `local-syslog` with a valid telemetry key and desired service label. Please also replace oneuptime.com with your host if you're testing this locally.
Inspect the service logs or connected queue to ensure the message is accepted and parsed.

View File

@@ -1,281 +0,0 @@
{
"resourceProfiles": [
{
"resource": {
"attributes": [
{
"key": "service.name",
"value": {
"stringValue": "my-go-service"
}
},
{
"key": "host.name",
"value": {
"stringValue": "prod-server-01"
}
},
{
"key": "process.runtime.name",
"value": {
"stringValue": "go"
}
},
{
"key": "process.runtime.version",
"value": {
"stringValue": "go1.22.0"
}
},
{
"key": "telemetry.sdk.name",
"value": {
"stringValue": "opentelemetry"
}
},
{
"key": "telemetry.sdk.language",
"value": {
"stringValue": "go"
}
},
{
"key": "telemetry.sdk.version",
"value": {
"stringValue": "1.28.0"
}
}
]
},
"scopeProfiles": [
{
"scope": {
"name": "otel-profiling-go",
"version": "0.5.0"
},
"profiles": [
{
"profileId": "qg7PaWLjuqLhWlwvlHRU9A==",
"startTimeUnixNano": "1700000000000000000",
"endTimeUnixNano": "1700000030000000000",
"attributes": [
{
"key": "profiling.data.type",
"value": {
"stringValue": "cpu"
}
}
],
"originalPayloadFormat": "pprofext",
"profile": {
"stringTable": [
"",
"cpu",
"nanoseconds",
"samples",
"count",
"main.handleRequest",
"/app/main.go",
"net/http.(*conn).serve",
"/usr/local/go/src/net/http/server.go",
"runtime.goexit",
"/usr/local/go/src/runtime/asm_amd64.s",
"main.processData",
"/app/processor.go",
"encoding/json.Marshal",
"/usr/local/go/src/encoding/json/encode.go",
"runtime.mallocgc",
"/usr/local/go/src/runtime/malloc.go",
"main.queryDatabase",
"/app/db.go",
"database/sql.(*DB).QueryContext",
"/usr/local/go/src/database/sql/sql.go"
],
"sampleType": [
{
"type": 1,
"unit": 2
},
{
"type": 3,
"unit": 4
}
],
"periodType": {
"type": 1,
"unit": 2
},
"period": 10000000,
"functionTable": [
{
"name": 5,
"filename": 6
},
{
"name": 7,
"filename": 8
},
{
"name": 9,
"filename": 10
},
{
"name": 11,
"filename": 12
},
{
"name": 13,
"filename": 14
},
{
"name": 15,
"filename": 16
},
{
"name": 17,
"filename": 18
},
{
"name": 19,
"filename": 20
}
],
"locationTable": [
{
"line": [
{
"functionIndex": 0,
"line": 42
}
]
},
{
"line": [
{
"functionIndex": 1,
"line": 1960
}
]
},
{
"line": [
{
"functionIndex": 2,
"line": 1700
}
]
},
{
"line": [
{
"functionIndex": 3,
"line": 88
}
]
},
{
"line": [
{
"functionIndex": 4,
"line": 160
}
]
},
{
"line": [
{
"functionIndex": 5,
"line": 905
}
]
},
{
"line": [
{
"functionIndex": 6,
"line": 55
}
]
},
{
"line": [
{
"functionIndex": 7,
"line": 1612
}
]
}
],
"stackTable": [
{
"locationIndices": [0, 1, 2]
},
{
"locationIndices": [3, 4, 0, 1, 2]
},
{
"locationIndices": [5, 3, 0, 1, 2]
},
{
"locationIndices": [6, 7, 0, 1, 2]
}
],
"linkTable": [
{
"traceId": "qg7PaWLjuqLhWlwvlHRU9A==",
"spanId": "r+N4WZXXfP4="
}
],
"attributeTable": [
{
"key": "profile.frame.type",
"value": {
"stringValue": "go"
}
},
{
"key": "thread.name",
"value": {
"stringValue": "main"
}
}
],
"sample": [
{
"stackIndex": 0,
"value": [50000000, 5],
"timestampsUnixNano": ["1700000005000000000"],
"linkIndex": 0,
"attributeIndices": [0, 1]
},
{
"stackIndex": 1,
"value": [120000000, 12],
"timestampsUnixNano": ["1700000010000000000"],
"linkIndex": 0,
"attributeIndices": [0]
},
{
"stackIndex": 2,
"value": [30000000, 3],
"timestampsUnixNano": ["1700000015000000000"],
"linkIndex": 0,
"attributeIndices": [0]
},
{
"stackIndex": 3,
"value": [80000000, 8],
"timestampsUnixNano": ["1700000020000000000"],
"linkIndex": 0,
"attributeIndices": [0]
}
]
}
}
]
}
],
"schemaUrl": "https://opentelemetry.io/schemas/1.21.0"
}
]
}

View File

@@ -1,60 +0,0 @@
{
"resourceLogs":[
{
"resource":{
"attributes":[
{
"key":"service.name",
"value":{
"stringValue":"otel-dotnet"
}
},
{
"key":"service.instance.id",
"value":{
"stringValue":"933c049b-1f18-4dbf-bd4f-acd9cbc3a03e"
}
},
{
"key":"telemetry.sdk.name",
"value":{
"stringValue":"opentelemetry"
}
},
{
"key":"telemetry.sdk.language",
"value":{
"stringValue":"dotnet"
}
},
{
"key":"telemetry.sdk.version",
"value":{
"stringValue":"1.6.0"
}
}
]
},
"scopeLogs":[
{
"scope":{
},
"logRecords":[
{
"timeUnixNano":"1698069643739368000",
"severityNumber":"SEVERITY_NUMBER_INFO",
"severityText":"Information",
"body":{
"stringValue":"Application is shutting down..."
},
"traceId":"",
"spanId":"",
"observedTimeUnixNano":"1698069643739368000"
}
]
}
]
}
]
}

View File

@@ -1,294 +0,0 @@
{
"resourceMetrics":[
{
"resource":{
"attributes":[
{
"key":"host.name",
"value":{
"stringValue":"c16e92aabd73"
}
},
{
"key":"process.command_args",
"value":{
"arrayValue":{
"values":[
{
"stringValue":"/app/main"
}
]
}
}
},
{
"key":"process.executable.name",
"value":{
"stringValue":"main"
}
},
{
"key":"process.executable.path",
"value":{
"stringValue":"/app/main"
}
},
{
"key":"process.owner",
"value":{
"stringValue":"root"
}
},
{
"key":"process.pid",
"value":{
"intValue":"1"
}
},
{
"key":"process.runtime.description",
"value":{
"stringValue":"go version go1.21.2 linux/arm64"
}
},
{
"key":"process.runtime.name",
"value":{
"stringValue":"go"
}
},
{
"key":"process.runtime.version",
"value":{
"stringValue":"go1.21.2"
}
},
{
"key":"service.name",
"value":{
"stringValue":"demo-client"
}
},
{
"key":"telemetry.sdk.language",
"value":{
"stringValue":"go"
}
},
{
"key":"telemetry.sdk.name",
"value":{
"stringValue":"opentelemetry"
}
},
{
"key":"telemetry.sdk.version",
"value":{
"stringValue":"1.19.0"
}
}
]
},
"scopeMetrics":[
{
"scope":{
"name":"demo-client-meter"
},
"metrics":[
{
"name":"demo_client/request_latency",
"description":"The latency of requests processed",
"histogram":{
"dataPoints":[
{
"startTimeUnixNano":"1698055858300756753",
"timeUnixNano":"1698073168340861251",
"count":"12262",
"sum":5009057.084501003,
"bucketCounts":[
"0",
"268",
"700",
"1805",
"852",
"770",
"645",
"1041",
"1731",
"1761",
"1451",
"1238",
"0",
"0",
"0",
"0"
],
"explicitBounds":[
0,
5,
10,
25,
50,
75,
100,
250,
500,
750,
1000,
2500,
5000,
7500,
10000
],
"attributes":[
{
"key":"client",
"value":{
"stringValue":"cli"
}
},
{
"key":"method",
"value":{
"stringValue":"repl"
}
}
],
"min":0.827,
"max":2003.162543
}
],
"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"
}
},
{
"name":"demo_client/request_counts",
"description":"The number of requests processed",
"sum":{
"dataPoints":[
{
"startTimeUnixNano":"1698055858300764253",
"timeUnixNano":"1698073168340863543",
"asInt":"12262",
"attributes":[
{
"key":"client",
"value":{
"stringValue":"cli"
}
},
{
"key":"method",
"value":{
"stringValue":"repl"
}
}
]
}
],
"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE",
"isMonotonic":true
}
},
{
"name":"demo_client/line_lengths",
"description":"The lengths of the various lines in",
"histogram":{
"dataPoints":[
{
"startTimeUnixNano":"1698055858300770170",
"timeUnixNano":"1698073168340864918",
"count":"36845",
"sum":18342968,
"bucketCounts":[
"27",
"174",
"185",
"586",
"928",
"967",
"904",
"5531",
"9221",
"9200",
"9122",
"0",
"0",
"0",
"0",
"0"
],
"explicitBounds":[
0,
5,
10,
25,
50,
75,
100,
250,
500,
750,
1000,
2500,
5000,
7500,
10000
],
"attributes":[
{
"key":"client",
"value":{
"stringValue":"cli"
}
},
{
"key":"method",
"value":{
"stringValue":"repl"
}
}
],
"min":0,
"max":998
}
],
"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE"
}
},
{
"name":"demo_client/line_counts",
"description":"The counts of the lines in",
"sum":{
"dataPoints":[
{
"startTimeUnixNano":"1698055858300772836",
"timeUnixNano":"1698073168340865418",
"asInt":"36845",
"attributes":[
{
"key":"client",
"value":{
"stringValue":"cli"
}
},
{
"key":"method",
"value":{
"stringValue":"repl"
}
}
]
}
],
"aggregationTemporality":"AGGREGATION_TEMPORALITY_CUMULATIVE",
"isMonotonic":true
}
}
]
}
],
"schemaUrl":"https://opentelemetry.io/schemas/1.21.0"
}
]
}

View File

@@ -1,42 +0,0 @@
# OpenTelemetry Profiles: Remaining Roadmap
All core phases (ingestion, storage, query API, frontend UI, alerting, docs) are implemented. This document tracks remaining future work items.
---
## Performance Optimization
- **Materialized Views**: Pre-aggregate top functions per service per hour for faster queries
- **Server-Side Sampling**: Downsampling for high-volume services to control storage costs
- **Query Caching**: Cache aggregated flamegraph results to reduce ClickHouse load
## Symbolization Pipeline
Symbolization is NOT yet standardized in the OTel Profiles spec. The eBPF agent handles on-target symbolization for Go, and many runtimes provide symbol info at collection time. A dedicated symbolization pipeline (symbol uploads, deferred re-symbolization, object storage) can be added once the spec stabilizes.
## Conformance Validation
Integrate OTel `profcheck` tool into CI once core profiling features stabilize.
---
## Key Risks & Mitigations
| Risk | Impact | Mitigation |
|------|--------|------------|
| OTLP Profiles is still Alpha — proto schema may change | Breaking changes to ingestion | Pin to specific OTLP proto version (v1.10.0+), add version detection |
| `v1development` package path will change to `v1` at GA | Proto import path migration | Abstract proto version behind internal types; plan migration script for when GA lands |
| High storage volume from continuous profiling | ClickHouse disk/cost growth | Server-side sampling, aggressive TTL defaults (15 days), ZSTD(3) compression |
| Flamegraph rendering performance with large profiles | Slow UI | Limit to top 10K stacktraces, lazy-load deep frames, pre-aggregate via materialized views |
| Symbolization is not standardized | Unsymbolized frames in flamegraphs | Store build IDs for deferred symbolization; accept eBPF agent's on-target symbolization as baseline |
| Semantic conventions are minimal (only `profile.frame.type`) | Schema may need changes as conventions mature | Keep attribute storage flexible (JSON columns); avoid hardcoding specific attribute names |
---
## References
- [OTel Profiles Alpha Blog Post](https://opentelemetry.io/blog/2026/profiles-alpha/)
- [OTLP Profiles Proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/profiles/v1development/profiles.proto)
- [OTel eBPF Profiling Agent](https://github.com/open-telemetry/opentelemetry-ebpf-profiler)
- [pprof Format](https://github.com/google/pprof)
- [OTel Semantic Conventions for Profiles](https://opentelemetry.io/docs/specs/semconv/general/profiles/)

View File

@@ -1,310 +0,0 @@
{
"resourceSpans": [
{
"resource": {
"attributes": [
{
"key": "host.name",
"value": {
"stringValue": "c16e92aabd73"
}
},
{
"key": "process.command_args",
"value": {
"arrayValue": {
"values": [
{
"stringValue": "/app/main"
}
]
}
}
},
{
"key": "process.executable.name",
"value": {
"stringValue": "main"
}
},
{
"key": "process.executable.path",
"value": {
"stringValue": "/app/main"
}
},
{
"key": "process.owner",
"value": {
"stringValue": "root"
}
},
{
"key": "process.pid",
"value": {
"intValue": "1"
}
},
{
"key": "process.runtime.description",
"value": {
"stringValue": "go version go1.21.2 linux/arm64"
}
},
{
"key": "process.runtime.name",
"value": {
"stringValue": "go"
}
},
{
"key": "process.runtime.version",
"value": {
"stringValue": "go1.21.2"
}
},
{
"key": "service.name",
"value": {
"stringValue": "demo-client"
}
},
{
"key": "telemetry.sdk.language",
"value": {
"stringValue": "go"
}
},
{
"key": "telemetry.sdk.name",
"value": {
"stringValue": "opentelemetry"
}
},
{
"key": "telemetry.sdk.version",
"value": {
"stringValue": "1.19.0"
}
}
]
},
"scopeSpans": [
{
"scope": {
"name": "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp",
"version": "0.44.0"
},
"spans": [
{
"traceId": "xALujWnjuqLhWlwvlHRU9A==",
"spanId": "r+N4WZXXfP4=",
"parentSpanId": "6fMLbQYm3c0=",
"name": "HTTP GET",
"kind": "SPAN_KIND_CLIENT",
"startTimeUnixNano": "1697797651330953503",
"endTimeUnixNano": "1697797651335485545",
"attributes": [
{
"key": "http.method",
"value": {
"stringValue": "GET"
}
},
{
"key": "http.flavor",
"value": {
"stringValue": "1.1"
}
},
{
"key": "http.url",
"value": {
"stringValue": "http://demo-server:7080/hello"
}
},
{
"key": "net.peer.name",
"value": {
"stringValue": "demo-server"
}
},
{
"key": "net.peer.port",
"value": {
"intValue": "7080"
}
},
{
"key": "http.status_code",
"value": {
"intValue": "200"
}
},
{
"key": "http.response_content_length",
"value": {
"intValue": "11"
}
}
],
"status": {
}
},
{
"traceId": "+5d347OFkoNrqTJcdQ0/pQ==",
"spanId": "aYG/Gp8vKeQ=",
"parentSpanId": "Pgt2JbxFK7g=",
"name": "HTTP GET",
"kind": "SPAN_KIND_CLIENT",
"startTimeUnixNano": "1697797652336932795",
"endTimeUnixNano": "1697797653208134504",
"attributes": [
{
"key": "http.method",
"value": {
"stringValue": "GET"
}
},
{
"key": "http.flavor",
"value": {
"stringValue": "1.1"
}
},
{
"key": "http.url",
"value": {
"stringValue": "http://demo-server:7080/hello"
}
},
{
"key": "net.peer.name",
"value": {
"stringValue": "demo-server"
}
},
{
"key": "net.peer.port",
"value": {
"intValue": "7080"
}
},
{
"key": "http.status_code",
"value": {
"intValue": "200"
}
},
{
"key": "http.response_content_length",
"value": {
"intValue": "11"
}
}
],
"status": {
}
},
{
"traceId": "W+rycekdacOYa6z/AZQAqQ==",
"spanId": "rqsqCMzBOeA=",
"parentSpanId": "XB3qqSVeF4w=",
"name": "HTTP GET",
"kind": "SPAN_KIND_CLIENT",
"startTimeUnixNano": "1697797654211901088",
"endTimeUnixNano": "1697797654913467921",
"attributes": [
{
"key": "http.method",
"value": {
"stringValue": "GET"
}
},
{
"key": "http.flavor",
"value": {
"stringValue": "1.1"
}
},
{
"key": "http.url",
"value": {
"stringValue": "http://demo-server:7080/hello"
}
},
{
"key": "net.peer.name",
"value": {
"stringValue": "demo-server"
}
},
{
"key": "net.peer.port",
"value": {
"intValue": "7080"
}
},
{
"key": "http.status_code",
"value": {
"intValue": "200"
}
},
{
"key": "http.response_content_length",
"value": {
"intValue": "11"
}
}
],
"status": {
}
}
]
},
{
"scope": {
"name": "demo-client-tracer"
},
"spans": [
{
"traceId": "xALujWnjuqLhWlwvlHRU9A==",
"spanId": "6fMLbQYm3c0=",
"parentSpanId": "",
"name": "ExecuteRequest",
"kind": "SPAN_KIND_INTERNAL",
"startTimeUnixNano": "1697797651330917795",
"endTimeUnixNano": "1697797651335519253",
"status": {
}
},
{
"traceId": "+5d347OFkoNrqTJcdQ0/pQ==",
"spanId": "Pgt2JbxFK7g=",
"parentSpanId": "",
"name": "ExecuteRequest",
"kind": "SPAN_KIND_INTERNAL",
"startTimeUnixNano": "1697797652336904795",
"endTimeUnixNano": "1697797653208214212",
"status": {
}
},
{
"traceId": "W+rycekdacOYa6z/AZQAqQ==",
"spanId": "XB3qqSVeF4w=",
"parentSpanId": "",
"name": "ExecuteRequest",
"kind": "SPAN_KIND_INTERNAL",
"startTimeUnixNano": "1697797654211873755",
"endTimeUnixNano": "1697797654913552214",
"status": {
}
}
]
}
],
"schemaUrl": "https://opentelemetry.io/schemas/1.21.0"
}
]
}

View File

@@ -1,133 +0,0 @@
import OTelIngestAPI from "./API/OTelIngest";
import MetricsAPI from "./API/Metrics";
import SyslogAPI from "./API/Syslog";
import FluentAPI from "./API/Fluent";
import PyroscopeAPI from "./API/Pyroscope";
// ProbeIngest routes
import ProbeIngestRegisterAPI from "./API/ProbeIngest/Register";
import ProbeIngestMonitorAPI from "./API/ProbeIngest/Monitor";
import ProbeIngestAPI from "./API/ProbeIngest/Probe";
import IncomingEmailAPI from "./API/ProbeIngest/IncomingEmail";
// ServerMonitorIngest routes
import ServerMonitorAPI from "./API/ServerMonitorIngest/ServerMonitor";
// IncomingRequestIngest routes
import IncomingRequestAPI from "./API/IncomingRequestIngest/IncomingRequest";
import { PromiseVoidFunction } from "Common/Types/FunctionTypes";
import { ClickhouseAppInstance } from "Common/Server/Infrastructure/ClickhouseDatabase";
import PostgresAppInstance from "Common/Server/Infrastructure/PostgresDatabase";
import Redis from "Common/Server/Infrastructure/Redis";
import InfrastructureStatus from "Common/Server/Infrastructure/Status";
import Express, { ExpressApplication } from "Common/Server/Utils/Express";
import logger from "Common/Server/Utils/Logger";
import Realtime from "Common/Server/Utils/Realtime";
import App from "Common/Server/Utils/StartServer";
import Telemetry from "Common/Server/Utils/Telemetry";
import Profiling from "Common/Server/Utils/Profiling";
import "./Jobs/TelemetryIngest/ProcessTelemetry";
import { TELEMETRY_CONCURRENCY } from "./Config";
import type { StatusAPIOptions } from "Common/Server/API/StatusAPI";
import { startGrpcServer } from "./GrpcServer";
import "ejs";
const app: ExpressApplication = Express.getExpressApp();
const APP_NAME: string = "telemetry";
const TELEMETRY_PREFIXES: Array<string> = [`/${APP_NAME}`, "/"];
// Existing telemetry routes
app.use(TELEMETRY_PREFIXES, OTelIngestAPI);
app.use(TELEMETRY_PREFIXES, MetricsAPI);
app.use(TELEMETRY_PREFIXES, SyslogAPI);
app.use(TELEMETRY_PREFIXES, FluentAPI);
app.use(TELEMETRY_PREFIXES, PyroscopeAPI);
/*
* ProbeIngest routes under ["/probe-ingest", "/ingestor", "/"]
* "/ingestor" is used for backward compatibility because probes are already deployed with this path in client environments.
*/
const PROBE_INGEST_PREFIXES: Array<string> = [
"/probe-ingest",
"/ingestor",
"/",
];
app.use(PROBE_INGEST_PREFIXES, ProbeIngestRegisterAPI);
app.use(PROBE_INGEST_PREFIXES, ProbeIngestMonitorAPI);
app.use(PROBE_INGEST_PREFIXES, ProbeIngestAPI);
app.use(["/probe-ingest", "/"], IncomingEmailAPI);
// ServerMonitorIngest routes under ["/server-monitor-ingest", "/"]
const SERVER_MONITOR_PREFIXES: Array<string> = ["/server-monitor-ingest", "/"];
app.use(SERVER_MONITOR_PREFIXES, ServerMonitorAPI);
// IncomingRequestIngest routes under ["/incoming-request-ingest", "/"]
const INCOMING_REQUEST_PREFIXES: Array<string> = [
"/incoming-request-ingest",
"/",
];
app.use(INCOMING_REQUEST_PREFIXES, IncomingRequestAPI);
const init: PromiseVoidFunction = async (): Promise<void> => {
try {
const statusCheck: PromiseVoidFunction = async (): Promise<void> => {
return await InfrastructureStatus.checkStatusWithRetry({
checkClickhouseStatus: true,
checkPostgresStatus: true,
checkRedisStatus: true,
retryCount: 3,
});
};
// Initialize telemetry
Telemetry.init({
serviceName: APP_NAME,
});
// Initialize profiling (opt-in via ENABLE_PROFILING env var)
Profiling.init({
serviceName: APP_NAME,
});
logger.info(
`Telemetry Service - Queue concurrency: ${TELEMETRY_CONCURRENCY}`,
);
// init the app
const statusOptions: StatusAPIOptions = {
liveCheck: statusCheck,
readyCheck: statusCheck,
};
await App.init({
appName: APP_NAME,
statusOptions: statusOptions,
});
// connect to the database.
await PostgresAppInstance.connect();
// connect redis
await Redis.connect();
await ClickhouseAppInstance.connect(
ClickhouseAppInstance.getDatasourceOptions(),
);
await Realtime.init();
// Start gRPC OTLP server on port 4317
startGrpcServer();
// add default routes
await App.addDefaultRoutes();
} catch (err) {
logger.error("App Init Failed:");
logger.error(err);
throw err;
}
};
init().catch((err: Error) => {
logger.error(err);
logger.error("Exiting node process");
process.exit(1);
});

View File

@@ -1,191 +0,0 @@
#!/bin/bash
# Integration test script for Fluent Bit / Fluentd log ingestion
#
# Tests the /fluentd/v1/logs endpoint with realistic payloads
# to verify that structured attributes are preserved.
#
# Usage:
# ./test-fluentd-ingest.sh <ONEUPTIME_URL> <SERVICE_TOKEN> [SERVICE_NAME]
#
# Example:
# ./test-fluentd-ingest.sh http://localhost:3400 your-token-here my-k8s-service
set -euo pipefail
URL="${1:?Usage: $0 <ONEUPTIME_URL> <SERVICE_TOKEN> [SERVICE_NAME]}"
TOKEN="${2:?Usage: $0 <ONEUPTIME_URL> <SERVICE_TOKEN> [SERVICE_NAME]}"
SERVICE_NAME="${3:-fluent-test-service}"
ENDPOINT="${URL}/fluentd/v1/logs"
echo "=== Fluent Bit/Fluentd Log Ingestion Integration Tests ==="
echo "Endpoint: ${ENDPOINT}"
echo "Service: ${SERVICE_NAME}"
echo ""
# Test 1: Single structured Kubernetes log entry
echo "--- Test 1: Single Kubernetes log entry with metadata ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '{
"message": "Connection to database established successfully",
"level": "info",
"stream": "stdout",
"time": "2024-01-15T10:30:00.123456789Z",
"kubernetes": {
"namespace_name": "production",
"pod_name": "api-server-7b9f4c8d5-xk2m9",
"container_name": "api-server",
"pod_id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
"labels": {
"app": "api-server",
"version": "v2.1.0",
"team": "platform"
},
"host": "node-pool-1-abc"
}
}')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
# Test 2: Batch of log entries (array format from Fluentd)
echo "--- Test 2: Batch of structured log entries ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '[
{
"message": "Request received: GET /api/health",
"level": "debug",
"stream": "stdout",
"kubernetes": {
"namespace_name": "production",
"pod_name": "web-abc123",
"container_name": "nginx"
}
},
{
"message": "Upstream timeout after 30s",
"level": "error",
"stream": "stderr",
"kubernetes": {
"namespace_name": "production",
"pod_name": "web-abc123",
"container_name": "nginx"
}
},
{
"message": "Retrying connection to upstream",
"level": "warning",
"stream": "stderr",
"kubernetes": {
"namespace_name": "production",
"pod_name": "web-abc123",
"container_name": "nginx"
}
}
]')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
# Test 3: Fluentd json-wrapped format
echo "--- Test 3: Fluentd json-wrapped format ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '{
"json": {
"log": "2024-01-15 ERROR: Failed to connect to redis:6379",
"stream": "stderr",
"level": "error",
"kubernetes": {
"namespace_name": "default",
"pod_name": "cache-worker-xyz",
"container_name": "worker",
"labels": {
"app.kubernetes.io/name": "cache-worker",
"app.kubernetes.io/component": "background"
}
}
}
}')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
# Test 4: Log with trace context
echo "--- Test 4: Log with trace/span IDs ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '{
"message": "Processing order #12345",
"level": "info",
"trace_id": "4bf92f3577b34da6a3ce929d0e0e4736",
"span_id": "00f067aa0ba902b7",
"kubernetes": {
"namespace_name": "production",
"pod_name": "order-service-abc"
},
"order_id": "12345",
"customer_id": "cust-789"
}')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
# Test 5: Plain string (backward compatibility)
echo "--- Test 5: Plain string body (backward compatibility) ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '"A simple plain-text log message"')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
# Test 6: Docker/container log format (log field instead of message)
echo "--- Test 6: Docker container log format (using 'log' field) ---"
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" \
-X POST "${ENDPOINT}" \
-H "Content-Type: application/json" \
-H "x-oneuptime-token: ${TOKEN}" \
-H "x-oneuptime-service-name: ${SERVICE_NAME}" \
-d '{
"log": "{\"ts\":\"2024-01-15T10:30:00Z\",\"msg\":\"Server started on port 8080\"}\n",
"stream": "stdout",
"time": "2024-01-15T10:30:00.000000001Z",
"kubernetes": {
"namespace_name": "staging",
"pod_name": "api-v2-deployment-85d97fb8c7-4xnpq",
"container_name": "api",
"container_image": "registry.example.com/api:v2.0.1"
}
}')
echo "HTTP Status: ${HTTP_CODE}"
[ "${HTTP_CODE}" = "200" ] && echo "PASS" || echo "FAIL (expected 200)"
echo ""
echo "=== All integration tests completed ==="
echo ""
echo "To verify attributes were stored, query the logs in OneUptime UI"
echo "and check for attributes like:"
echo " - fluentd.kubernetes.namespace_name"
echo " - fluentd.kubernetes.pod_name"
echo " - fluentd.kubernetes.container_name"
echo " - fluentd.kubernetes.labels.*"
echo " - fluentd.stream"
echo " - fluentd.time"

View File

@@ -1,554 +0,0 @@
import FluentLogsIngestService from "../../Services/FluentLogsIngestService";
import LogSeverity from "Common/Types/Log/LogSeverity";
import { JSONObject } from "Common/Types/JSON";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const service: any = FluentLogsIngestService as any;
describe("FluentLogsIngestService", () => {
describe("normalizeLogEntries", () => {
test("preserves structured JSON object as-is", () => {
const payload: JSONObject = {
message: "Connection refused",
level: "error",
stream: "stderr",
kubernetes: {
namespace_name: "default",
pod_name: "my-app-xyz",
},
};
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(1);
expect(entries[0]).toEqual(payload);
});
test("preserves array of structured objects", () => {
const payload: Array<JSONObject> = [
{ message: "log 1", stream: "stdout" },
{ message: "log 2", stream: "stderr" },
];
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(2);
expect(entries[0]!["message"]).toBe("log 1");
expect(entries[0]!["stream"]).toBe("stdout");
expect(entries[1]!["message"]).toBe("log 2");
expect(entries[1]!["stream"]).toBe("stderr");
});
test("unwraps 'json' container field", () => {
const payload: JSONObject = {
json: {
message: "inner log",
kubernetes: { pod_name: "test-pod" },
},
};
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(1);
expect(entries[0]!["message"]).toBe("inner log");
expect((entries[0]!["kubernetes"] as JSONObject)["pod_name"]).toBe(
"test-pod",
);
});
test("unwraps 'entries' container field", () => {
const payload: JSONObject = {
entries: [
{ message: "entry 1", host: "node-1" },
{ message: "entry 2", host: "node-2" },
],
};
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(2);
expect(entries[0]!["host"]).toBe("node-1");
expect(entries[1]!["host"]).toBe("node-2");
});
test("wraps plain string in JSONObject with message field", () => {
const entries: Array<JSONObject> = service["normalizeLogEntries"](
"simple log line",
) as Array<JSONObject>;
expect(entries).toHaveLength(1);
expect(entries[0]!["message"]).toBe("simple log line");
});
test("splits multiline string into separate entries", () => {
const entries: Array<JSONObject> = service["normalizeLogEntries"](
"line one\nline two\nline three",
) as Array<JSONObject>;
expect(entries).toHaveLength(3);
expect(entries[0]!["message"]).toBe("line one");
expect(entries[1]!["message"]).toBe("line two");
expect(entries[2]!["message"]).toBe("line three");
});
test("handles null and undefined", () => {
expect(service["normalizeLogEntries"](null)).toEqual([]);
expect(service["normalizeLogEntries"](undefined)).toEqual([]);
});
test("handles empty string", () => {
expect(service["normalizeLogEntries"]("")).toEqual([]);
expect(service["normalizeLogEntries"](" ")).toEqual([]);
});
});
describe("extractBodyFromEntry", () => {
test("extracts from 'message' field", () => {
const entry: JSONObject = {
message: "the log body",
stream: "stdout",
};
const body: string = service["extractBodyFromEntry"](entry) as string;
expect(body).toBe("the log body");
});
test("extracts from 'log' field", () => {
const entry: JSONObject = {
log: "container output line",
stream: "stderr",
};
const body: string = service["extractBodyFromEntry"](entry) as string;
expect(body).toBe("container output line");
});
test("extracts from 'msg' field", () => {
const entry: JSONObject = { msg: "short msg field" };
const body: string = service["extractBodyFromEntry"](entry) as string;
expect(body).toBe("short msg field");
});
test("prefers 'message' over 'log'", () => {
const entry: JSONObject = {
message: "from message",
log: "from log",
};
const body: string = service["extractBodyFromEntry"](entry) as string;
expect(body).toBe("from message");
});
test("stringifies entire entry when no body field found", () => {
const entry: JSONObject = {
stream: "stdout",
kubernetes: { pod_name: "test" },
};
const body: string = service["extractBodyFromEntry"](entry) as string;
const parsed: JSONObject = JSON.parse(body) as JSONObject;
expect(parsed["stream"]).toBe("stdout");
});
test("stringifies non-string body field values", () => {
const entry: JSONObject = {
message: { nested: "object" },
};
const body: string = service["extractBodyFromEntry"](entry) as string;
expect(body).toBe('{"nested":"object"}');
});
});
describe("extractSeverityFromEntry", () => {
test("maps 'error' level", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "error" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(17);
expect(result.text).toBe(LogSeverity.Error);
});
test("maps 'info' level", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "info" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(9);
expect(result.text).toBe(LogSeverity.Information);
});
test("maps 'warn' level", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "warn" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(13);
expect(result.text).toBe(LogSeverity.Warning);
});
test("maps 'debug' level", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "debug" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(5);
expect(result.text).toBe(LogSeverity.Debug);
});
test("maps 'fatal' level", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "fatal" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(23);
expect(result.text).toBe(LogSeverity.Fatal);
});
test("reads from 'severity' field", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ severity: "warning" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(13);
expect(result.text).toBe(LogSeverity.Warning);
});
test("is case-insensitive", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "ERROR" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(17);
expect(result.text).toBe(LogSeverity.Error);
});
test("returns Unspecified for missing severity", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ message: "no severity" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(0);
expect(result.text).toBe(LogSeverity.Unspecified);
});
test("returns Unspecified for unknown severity value", () => {
const result: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
]({ level: "verbose" }) as { number: number; text: LogSeverity };
expect(result.number).toBe(0);
expect(result.text).toBe(LogSeverity.Unspecified);
});
});
describe("extractStringField", () => {
test("extracts string value from first matching field", () => {
const result: string | undefined = service["extractStringField"](
{ trace_id: "abc123" },
["trace_id", "traceId"],
) as string | undefined;
expect(result).toBe("abc123");
});
test("tries fields in order", () => {
const result: string | undefined = service["extractStringField"](
{ traceId: "from-camel" },
["trace_id", "traceId"],
) as string | undefined;
expect(result).toBe("from-camel");
});
test("converts number to string", () => {
const result: string | undefined = service["extractStringField"](
{ priority: 42 },
["priority"],
) as string | undefined;
expect(result).toBe("42");
});
test("returns undefined when no fields match", () => {
const result: string | undefined = service["extractStringField"](
{ other: "value" },
["trace_id", "traceId"],
) as string | undefined;
expect(result).toBeUndefined();
});
test("skips empty strings", () => {
const result: string | undefined = service["extractStringField"](
{ trace_id: "", traceId: "fallback" },
["trace_id", "traceId"],
) as string | undefined;
expect(result).toBe("fallback");
});
});
describe("buildFluentAttributes", () => {
test("extracts top-level scalar fields with fluentd. prefix", () => {
const entry: JSONObject = {
message: "body text",
stream: "stdout",
tag: "kube.var.log",
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
// 'message' is excluded (it's a body field)
expect(attrs["fluentd.message"]).toBeUndefined();
// other fields are included
expect(attrs["fluentd.stream"]).toBe("stdout");
expect(attrs["fluentd.tag"]).toBe("kube.var.log");
});
test("flattens nested objects with dot notation", () => {
const entry: JSONObject = {
message: "log",
kubernetes: {
namespace_name: "default",
pod_name: "my-app-xyz",
container_name: "app",
labels: {
app: "my-app",
version: "v1",
},
},
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
expect(attrs["fluentd.kubernetes.namespace_name"]).toBe("default");
expect(attrs["fluentd.kubernetes.pod_name"]).toBe("my-app-xyz");
expect(attrs["fluentd.kubernetes.container_name"]).toBe("app");
expect(attrs["fluentd.kubernetes.labels.app"]).toBe("my-app");
expect(attrs["fluentd.kubernetes.labels.version"]).toBe("v1");
});
test("serializes arrays as JSON strings", () => {
const entry: JSONObject = {
message: "log",
tags: ["web", "production"],
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
expect(attrs["fluentd.tags"]).toBe('["web","production"]');
});
test("handles boolean and numeric values", () => {
const entry: JSONObject = {
message: "log",
count: 42,
success: true,
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
expect(attrs["fluentd.count"]).toBe(42);
expect(attrs["fluentd.success"]).toBe(true);
});
test("excludes all body, severity, trace, and span fields", () => {
const entry: JSONObject = {
message: "body",
log: "also body",
level: "info",
severity: "info",
trace_id: "abc",
traceId: "abc",
span_id: "def",
spanId: "def",
custom_field: "should be kept",
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
expect(attrs["fluentd.message"]).toBeUndefined();
expect(attrs["fluentd.log"]).toBeUndefined();
expect(attrs["fluentd.level"]).toBeUndefined();
expect(attrs["fluentd.severity"]).toBeUndefined();
expect(attrs["fluentd.trace_id"]).toBeUndefined();
expect(attrs["fluentd.traceId"]).toBeUndefined();
expect(attrs["fluentd.span_id"]).toBeUndefined();
expect(attrs["fluentd.spanId"]).toBeUndefined();
expect(attrs["fluentd.custom_field"]).toBe("should be kept");
});
test("skips null and undefined values", () => {
const entry: JSONObject = {
message: "log",
null_field: null,
valid_field: "kept",
};
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
entry,
) as Record<string, unknown>;
expect(attrs["fluentd.null_field"]).toBeUndefined();
expect(attrs["fluentd.valid_field"]).toBe("kept");
});
});
describe("full Kubernetes Fluent Bit payload", () => {
test("correctly processes a typical Kubernetes log entry", () => {
const k8sPayload: JSONObject = {
log: "2024-01-15T10:30:00.123Z ERROR Connection refused to database\n",
stream: "stderr",
time: "2024-01-15T10:30:00.123456789Z",
level: "error",
kubernetes: {
pod_name: "api-server-7b9f4c8d5-xk2m9",
namespace_name: "production",
container_name: "api-server",
pod_id: "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
labels: {
app: "api-server",
"app.kubernetes.io/version": "2.1.0",
team: "platform",
},
host: "node-pool-1-abc",
},
};
// Test normalization preserves the object
const entries: Array<JSONObject> = service["normalizeLogEntries"](
k8sPayload,
) as Array<JSONObject>;
expect(entries).toHaveLength(1);
expect(entries[0]).toEqual(k8sPayload);
// Test body extraction
const body: string = service["extractBodyFromEntry"](
k8sPayload,
) as string;
expect(body).toBe(
"2024-01-15T10:30:00.123Z ERROR Connection refused to database\n",
);
// Test severity extraction
const severity: { number: number; text: LogSeverity } = service[
"extractSeverityFromEntry"
](k8sPayload) as { number: number; text: LogSeverity };
expect(severity.text).toBe(LogSeverity.Error);
expect(severity.number).toBe(17);
// Test attributes extraction
const attrs: Record<string, unknown> = service["buildFluentAttributes"](
k8sPayload,
) as Record<string, unknown>;
// Body and severity fields excluded
expect(attrs["fluentd.log"]).toBeUndefined();
expect(attrs["fluentd.level"]).toBeUndefined();
// Other fields preserved
expect(attrs["fluentd.stream"]).toBe("stderr");
expect(attrs["fluentd.time"]).toBe("2024-01-15T10:30:00.123456789Z");
// Kubernetes metadata flattened
expect(attrs["fluentd.kubernetes.pod_name"]).toBe(
"api-server-7b9f4c8d5-xk2m9",
);
expect(attrs["fluentd.kubernetes.namespace_name"]).toBe("production");
expect(attrs["fluentd.kubernetes.container_name"]).toBe("api-server");
expect(attrs["fluentd.kubernetes.pod_id"]).toBe(
"a1b2c3d4-e5f6-7890-abcd-ef1234567890",
);
expect(attrs["fluentd.kubernetes.labels.app"]).toBe("api-server");
expect(attrs["fluentd.kubernetes.labels.app.kubernetes.io/version"]).toBe(
"2.1.0",
);
expect(attrs["fluentd.kubernetes.labels.team"]).toBe("platform");
expect(attrs["fluentd.kubernetes.host"]).toBe("node-pool-1-abc");
});
test("handles Fluentd json-wrapped Kubernetes payload", () => {
const payload: JSONObject = {
json: {
log: "Application started successfully",
stream: "stdout",
level: "info",
kubernetes: {
namespace_name: "staging",
pod_name: "web-abc123",
},
},
};
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(1);
expect(entries[0]!["log"]).toBe("Application started successfully");
expect((entries[0]!["kubernetes"] as JSONObject)["namespace_name"]).toBe(
"staging",
);
});
test("handles batch of Fluentd log entries", () => {
const payload: Array<JSONObject> = [
{
message: "Request received",
level: "info",
kubernetes: { pod_name: "web-1" },
},
{
message: "Processing failed",
level: "error",
kubernetes: { pod_name: "web-1" },
},
{
message: "Retry succeeded",
level: "warn",
kubernetes: { pod_name: "web-1" },
},
];
const entries: Array<JSONObject> = service["normalizeLogEntries"](
payload,
) as Array<JSONObject>;
expect(entries).toHaveLength(3);
const sev0: { text: LogSeverity } = service["extractSeverityFromEntry"](
entries[0]!,
) as { text: LogSeverity };
const sev1: { text: LogSeverity } = service["extractSeverityFromEntry"](
entries[1]!,
) as { text: LogSeverity };
const sev2: { text: LogSeverity } = service["extractSeverityFromEntry"](
entries[2]!,
) as { text: LogSeverity };
expect(sev0.text).toBe(LogSeverity.Information);
expect(sev1.text).toBe(LogSeverity.Error);
expect(sev2.text).toBe(LogSeverity.Warning);
});
});
});

View File

@@ -1,457 +0,0 @@
import OtelProfilesIngestService from "../../Services/OtelProfilesIngestService";
import { JSONObject, JSONArray } from "Common/Types/JSON";
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const service: any = OtelProfilesIngestService as any;
describe("OtelProfilesIngestService", () => {
describe("resolveStackFrames", () => {
const baseStringTable: Array<string> = [
"", // 0 - empty
"main", // 1
"app.go", // 2
"runtime.main", // 3
"runtime/proc.go", // 4
"handleRequest", // 5
"server.go", // 6
"kernel", // 7
"native", // 8
"go", // 9
];
const baseFunctionTable: JSONArray = [
{ name: 1, systemName: 1, filename: 2, startLine: 10 }, // 0: main@app.go
{ name: 3, systemName: 3, filename: 4, startLine: 100 }, // 1: runtime.main@runtime/proc.go
{ name: 5, systemName: 5, filename: 6, startLine: 50 }, // 2: handleRequest@server.go
];
const baseLocationTable: JSONArray = [
{
mappingIndex: 0,
address: 4096,
line: [{ functionIndex: 0, line: 15, column: 0 }],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
}, // 0: main@app.go:15
{
mappingIndex: 0,
address: 8192,
line: [{ functionIndex: 1, line: 120, column: 0 }],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
}, // 1: runtime.main@runtime/proc.go:120
{
mappingIndex: 0,
address: 12288,
line: [{ functionIndex: 2, line: 55, column: 0 }],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
}, // 2: handleRequest@server.go:55
];
const baseAttributeTable: JSONArray = [
{ key: "profile.frame.type", value: { stringValue: "go" } },
];
test("resolves simple stack via stack_table", () => {
const stackTable: JSONArray = [
{ locationIndices: [0, 1] }, // stack 0: main, runtime.main
];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: baseLocationTable,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(2);
expect(result.frames[0]).toBe("main@app.go:15");
expect(result.frames[1]).toBe("runtime.main@runtime/proc.go:120");
expect(result.frameTypes[0]).toBe("go");
expect(result.frameTypes[1]).toBe("go");
});
test("resolves stack with three frames", () => {
const stackTable: JSONArray = [
{ locationIndices: [2, 0, 1] }, // handleRequest -> main -> runtime.main
];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: baseLocationTable,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(3);
expect(result.frames[0]).toBe("handleRequest@server.go:55");
expect(result.frames[1]).toBe("main@app.go:15");
expect(result.frames[2]).toBe("runtime.main@runtime/proc.go:120");
});
test("handles inline frames (multiple lines per location)", () => {
const locationTableWithInline: JSONArray = [
{
mappingIndex: 0,
address: 4096,
line: [
{ functionIndex: 0, line: 15, column: 0 }, // main@app.go:15
{ functionIndex: 2, line: 55, column: 0 }, // handleRequest@server.go:55 (inlined)
],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
},
];
const stackTable: JSONArray = [{ locationIndices: [0] }];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: locationTableWithInline,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
// Inline frames should expand into separate frames
expect(result.frames).toHaveLength(2);
expect(result.frames[0]).toBe("main@app.go:15");
expect(result.frames[1]).toBe("handleRequest@server.go:55");
});
test("handles location without line info (uses hex address)", () => {
const locationTableNoLine: JSONArray = [
{
mappingIndex: 0,
address: 65535,
line: [],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
},
];
const stackTable: JSONArray = [{ locationIndices: [0] }];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: locationTableNoLine,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(1);
expect(result.frames[0]).toBe("0xffff");
});
test("handles empty stack", () => {
const stackTable: JSONArray = [{ locationIndices: [] }];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: baseLocationTable,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(0);
expect(result.frameTypes).toHaveLength(0);
});
test("handles out-of-bounds location index gracefully", () => {
const stackTable: JSONArray = [{ locationIndices: [999] }];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: baseLocationTable,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(1);
expect(result.frames[0]).toBe("<unknown>");
expect(result.frameTypes[0]).toBe("unknown");
});
test("falls back to locationsStartIndex/locationsLength when no stackIndex", () => {
const sample: JSONObject = {
locationsStartIndex: 0,
locationsLength: 2,
};
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable: [],
locationTable: baseLocationTable,
functionTable: baseFunctionTable,
stringTable: baseStringTable,
attributeTable: baseAttributeTable,
});
expect(result.frames).toHaveLength(2);
expect(result.frames[0]).toBe("main@app.go:15");
expect(result.frames[1]).toBe("runtime.main@runtime/proc.go:120");
});
test("handles function without filename", () => {
const stringTableNoFile: Array<string> = [
"", // 0
"anonymous", // 1
];
const functionTableNoFile: JSONArray = [
{ name: 1, systemName: 1, filename: 0, startLine: 0 }, // anonymous (no file)
];
const locationTableNoFile: JSONArray = [
{
mappingIndex: 0,
address: 4096,
line: [{ functionIndex: 0, line: 0, column: 0 }],
isFolded: false,
typeIndex: 0,
attributeIndices: [],
},
];
const stackTable: JSONArray = [{ locationIndices: [0] }];
const sample: JSONObject = { stackIndex: 0 };
const result: { frames: Array<string>; frameTypes: Array<string> } =
service["resolveStackFrames"]({
sample,
stackTable,
locationTable: locationTableNoFile,
functionTable: functionTableNoFile,
stringTable: stringTableNoFile,
attributeTable: [],
});
expect(result.frames).toHaveLength(1);
// Should just be function name without file or line
expect(result.frames[0]).toBe("anonymous");
});
});
describe("safeParseUnixNano", () => {
test("parses numeric value correctly", () => {
const nanos: number = 1700000000000000000;
const result: {
unixNano: number;
nano: string;
iso: string;
date: Date;
} = service["safeParseUnixNano"](nanos, "test");
expect(result.unixNano).toBe(nanos);
expect(result.nano).toBe(nanos.toString());
expect(result.date).toBeInstanceOf(Date);
});
test("parses string value correctly", () => {
const nanos: string = "1700000000000000000";
const result: {
unixNano: number;
nano: string;
iso: string;
date: Date;
} = service["safeParseUnixNano"](nanos, "test");
expect(result.unixNano).toBe(1700000000000000000);
expect(result.date).toBeInstanceOf(Date);
});
test("falls back to current time for undefined", () => {
const result: {
unixNano: number;
nano: string;
iso: string;
date: Date;
} = service["safeParseUnixNano"](undefined, "test");
expect(result.unixNano).toBeGreaterThan(0);
expect(result.date).toBeInstanceOf(Date);
});
test("falls back to current time for NaN string", () => {
const result: {
unixNano: number;
nano: string;
iso: string;
date: Date;
} = service["safeParseUnixNano"]("not-a-number", "test");
expect(result.unixNano).toBeGreaterThan(0);
expect(result.date).toBeInstanceOf(Date);
});
test("falls back to current time for Infinity", () => {
const result: {
unixNano: number;
nano: string;
iso: string;
date: Date;
} = service["safeParseUnixNano"](Infinity, "test");
expect(result.unixNano).toBeGreaterThan(0);
expect(result.date).toBeInstanceOf(Date);
});
});
describe("convertBase64ToHexSafe", () => {
test("returns empty string for undefined", () => {
const result: string = service["convertBase64ToHexSafe"](undefined);
expect(result).toBe("");
});
test("returns empty string for empty string", () => {
const result: string = service["convertBase64ToHexSafe"]("");
expect(result).toBe("");
});
test("converts valid base64 to hex", () => {
// "AQID" is base64 for bytes [1, 2, 3] which is hex "010203"
const result: string = service["convertBase64ToHexSafe"]("AQID");
expect(result).toBe("010203");
});
});
describe("buildProfileRow", () => {
test("builds profile row with all fields", () => {
const row: JSONObject = service["buildProfileRow"]({
projectId: {
toString: () => {
return "proj-123";
},
},
serviceId: {
toString: () => {
return "svc-456";
},
},
profileId: "profile-789",
traceId: "trace-abc",
spanId: "span-def",
startTime: {
unixNano: 1700000000000000000,
nano: "1700000000000000000",
iso: "2023-11-14T22:13:20.000Z",
date: new Date("2023-11-14T22:13:20.000Z"),
},
endTime: {
unixNano: 1700000001000000000,
nano: "1700000001000000000",
iso: "2023-11-14T22:13:21.000Z",
date: new Date("2023-11-14T22:13:21.000Z"),
},
durationNano: 1000000000,
profileType: "cpu",
unit: "nanoseconds",
periodType: "cpu",
period: 10000000,
attributes: { "resource.service.name": "my-service" },
attributeKeys: ["resource.service.name"],
sampleCount: 100,
originalPayloadFormat: "pprofext",
dataRetentionInDays: 15,
});
expect(row["projectId"]).toBe("proj-123");
expect(row["serviceId"]).toBe("svc-456");
expect(row["profileId"]).toBe("profile-789");
expect(row["traceId"]).toBe("trace-abc");
expect(row["spanId"]).toBe("span-def");
expect(row["profileType"]).toBe("cpu");
expect(row["unit"]).toBe("nanoseconds");
expect(row["periodType"]).toBe("cpu");
expect(row["sampleCount"]).toBe(100);
expect(row["originalPayloadFormat"]).toBe("pprofext");
expect(row["_id"]).toBeDefined();
expect(row["retentionDate"]).toBeDefined();
});
});
describe("buildSampleRow", () => {
test("builds sample row with all fields", () => {
const row: JSONObject = service["buildSampleRow"]({
projectId: {
toString: () => {
return "proj-123";
},
},
serviceId: {
toString: () => {
return "svc-456";
},
},
profileId: "profile-789",
traceId: "trace-abc",
spanId: "span-def",
time: {
unixNano: 1700000000000000000,
nano: "1700000000000000000",
iso: "2023-11-14T22:13:20.000Z",
date: new Date("2023-11-14T22:13:20.000Z"),
},
stacktrace: ["main@app.go:15", "runtime.main@runtime/proc.go:120"],
stacktraceHash: "abc123",
frameTypes: ["go", "go"],
value: 50000,
profileType: "cpu",
labels: { "thread.name": "main" },
dataRetentionInDays: 15,
});
expect(row["projectId"]).toBe("proj-123");
expect(row["serviceId"]).toBe("svc-456");
expect(row["profileId"]).toBe("profile-789");
expect(row["traceId"]).toBe("trace-abc");
expect(row["stacktrace"]).toEqual([
"main@app.go:15",
"runtime.main@runtime/proc.go:120",
]);
expect(row["stacktraceHash"]).toBe("abc123");
expect(row["frameTypes"]).toEqual(["go", "go"]);
expect(row["value"]).toBe("50000");
expect(row["profileType"]).toBe("cpu");
expect(row["labels"]).toEqual({ "thread.name": "main" });
expect(row["_id"]).toBeDefined();
expect(row["retentionDate"]).toBeDefined();
});
});
});

View File

@@ -1,359 +0,0 @@
import ExceptionUtil from "../../Utils/Exception";
import ObjectID from "Common/Types/ObjectID";
describe("ExceptionUtil", () => {
describe("normalizeForFingerprint", () => {
test("normalizes Stripe subscription IDs", () => {
const message1: string =
"No such subscription: 'sub_1POgR8ANuQdJ93r7dySVHs4K'";
const message2: string =
"No such subscription: 'sub_1PRZvTANuQdJ93r7K1nhUFZ9'";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("No such subscription: '<STRIPE_ID>'");
});
test("normalizes Stripe customer IDs", () => {
const message1: string = "Customer cus_ABC123DEF456GHI not found";
const message2: string = "Customer cus_XYZ789JKL012MNO not found";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Customer <STRIPE_ID> not found");
});
test("normalizes UUIDs", () => {
const message1: string =
"Failed to find resource 550e8400-e29b-41d4-a716-446655440000";
const message2: string =
"Failed to find resource a1b2c3d4-e5f6-7890-abcd-ef1234567890";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Failed to find resource <UUID>");
});
test("normalizes MongoDB ObjectIDs", () => {
const message1: string = "Document not found: 507f1f77bcf86cd799439011";
const message2: string = "Document not found: 60a1b2c3d4e5f6a7b8c9d0e1";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Document not found: <OBJECT_ID>");
});
test("normalizes IP addresses", () => {
const message1: string = "Connection refused from 192.168.1.100";
const message2: string = "Connection refused from 10.0.0.50";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Connection refused from <IP>");
});
test("normalizes email addresses", () => {
const message1: string = "Invalid email: user@example.com";
const message2: string = "Invalid email: admin@company.org";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Invalid email: <EMAIL>");
});
test("normalizes timestamps", () => {
const message1: string = "Request failed at 2024-03-15T14:30:00.000Z";
const message2: string = "Request failed at 2024-12-01T09:15:30.500Z";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Request failed at <TIMESTAMP>");
});
test("normalizes Unix timestamps", () => {
const message1: string = "Event occurred at 1710511800000";
const message2: string = "Event occurred at 1733059530500";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Event occurred at <TIMESTAMP>");
});
test("normalizes memory addresses", () => {
const message1: string = "Segmentation fault at 0x7fff5fbff8c0";
const message2: string = "Segmentation fault at 0x00007ffe12345678";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Segmentation fault at <MEMORY_ADDR>");
});
test("normalizes session IDs", () => {
const message1: string = "Session expired: session_id=abc123def456";
const message2: string = "Session expired: session_id=xyz789jkl012";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Session expired: session_id=<SESSION>");
});
test("normalizes request IDs", () => {
const message1: string = "Request failed: request_id=req_abc123";
const message2: string = "Request failed: request_id=req_xyz789";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Request failed: request_id=<REQUEST>");
});
test("normalizes large numbers", () => {
/*
* Large numbers (8+ digits) may match hex pattern since 0-9 are valid hex
* The important thing is both normalize to the same value
*/
const message1: string = "User 8234567890 not found";
const message2: string = "User 9876543210 not found";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
// Both should normalize to the same value (ensuring same fingerprint)
expect(normalized1).toBe(normalized2);
});
test("normalizes 7-digit numbers as NUMBER", () => {
// 7-digit numbers don't match hex pattern (8+ chars) so fall through to NUMBER
const message1: string = "Error code 1234567";
const message2: string = "Error code 9876543";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Error code <NUMBER>");
});
test("handles empty string", () => {
const normalized: string = ExceptionUtil.normalizeForFingerprint("");
expect(normalized).toBe("");
});
test("preserves meaningful text while normalizing IDs", () => {
const message: string =
"Failed to process payment for customer cus_ABC123DEF456GHI: Card declined";
const normalized: string = ExceptionUtil.normalizeForFingerprint(message);
expect(normalized).toBe(
"Failed to process payment for customer <STRIPE_ID>: Card declined",
);
});
test("normalizes multiple dynamic values in same message", () => {
const message1: string =
"User user@example.com (id=12345678) failed to access resource 550e8400-e29b-41d4-a716-446655440000";
const message2: string =
"User admin@company.org (id=87654321) failed to access resource a1b2c3d4-e5f6-7890-abcd-ef1234567890";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
});
test("normalizes JWT tokens", () => {
const message1: string =
"Invalid token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c";
const message2: string =
"Invalid token: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiI5ODc2NTQzMjEwIiwibmFtZSI6IkphbmUgRG9lIiwiaWF0IjoxNjE2MjM5MDIyfQ.DifferentSignatureHere123456789";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Invalid token: <JWT>");
});
test("normalizes generic service IDs with prefix_alphanumeric pattern", () => {
const message1: string = "Failed to find resource aws_abc123def456";
const message2: string = "Failed to find resource aws_xyz789jkl012";
const normalized1: string =
ExceptionUtil.normalizeForFingerprint(message1);
const normalized2: string =
ExceptionUtil.normalizeForFingerprint(message2);
expect(normalized1).toBe(normalized2);
expect(normalized1).toBe("Failed to find resource <SERVICE_ID>");
});
});
describe("getFingerprint", () => {
test("generates same fingerprint for exceptions with different dynamic IDs", () => {
const projectId: ObjectID = ObjectID.generate();
const serviceId: ObjectID = ObjectID.generate();
const fingerprint1: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "No such subscription: 'sub_1POgR8ANuQdJ93r7dySVHs4K'",
exceptionType: "StripeError",
stackTrace: "at processPayment (payment.js:100)",
});
const fingerprint2: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "No such subscription: 'sub_1PRZvTANuQdJ93r7K1nhUFZ9'",
exceptionType: "StripeError",
stackTrace: "at processPayment (payment.js:100)",
});
expect(fingerprint1).toBe(fingerprint2);
});
test("generates different fingerprints for different exception types", () => {
const projectId: ObjectID = ObjectID.generate();
const serviceId: ObjectID = ObjectID.generate();
const fingerprint1: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "No such subscription: 'sub_1POgR8ANuQdJ93r7dySVHs4K'",
exceptionType: "StripeError",
stackTrace: "at processPayment (payment.js:100)",
});
const fingerprint2: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "No such subscription: 'sub_1PRZvTANuQdJ93r7K1nhUFZ9'",
exceptionType: "PaymentError",
stackTrace: "at processPayment (payment.js:100)",
});
expect(fingerprint1).not.toBe(fingerprint2);
});
test("generates different fingerprints for different services", () => {
const projectId: ObjectID = ObjectID.generate();
const serviceId1: ObjectID = ObjectID.generate();
const serviceId2: ObjectID = ObjectID.generate();
const fingerprint1: string = ExceptionUtil.getFingerprint({
projectId,
serviceId: serviceId1,
message: "No such subscription: 'sub_1POgR8ANuQdJ93r7dySVHs4K'",
exceptionType: "StripeError",
});
const fingerprint2: string = ExceptionUtil.getFingerprint({
projectId,
serviceId: serviceId2,
message: "No such subscription: 'sub_1PRZvTANuQdJ93r7K1nhUFZ9'",
exceptionType: "StripeError",
});
expect(fingerprint1).not.toBe(fingerprint2);
});
test("generates different fingerprints for different projects", () => {
const projectId1: ObjectID = ObjectID.generate();
const projectId2: ObjectID = ObjectID.generate();
const serviceId: ObjectID = ObjectID.generate();
const fingerprint1: string = ExceptionUtil.getFingerprint({
projectId: projectId1,
serviceId,
message: "Error occurred",
exceptionType: "Error",
});
const fingerprint2: string = ExceptionUtil.getFingerprint({
projectId: projectId2,
serviceId,
message: "Error occurred",
exceptionType: "Error",
});
expect(fingerprint1).not.toBe(fingerprint2);
});
test("generates same fingerprint for similar stack traces with different line numbers", () => {
const projectId: ObjectID = ObjectID.generate();
const serviceId: ObjectID = ObjectID.generate();
// Stack traces might have memory addresses or other dynamic values
const fingerprint1: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "NullPointerException",
exceptionType: "NullPointerException",
stackTrace:
"at com.example.MyClass.method(MyClass.java:42)\nat 0x7fff5fbff8c0",
});
const fingerprint2: string = ExceptionUtil.getFingerprint({
projectId,
serviceId,
message: "NullPointerException",
exceptionType: "NullPointerException",
stackTrace:
"at com.example.MyClass.method(MyClass.java:42)\nat 0x00007ffe12345678",
});
expect(fingerprint1).toBe(fingerprint2);
});
});
});

View File

@@ -1,289 +0,0 @@
import StackTraceParser, {
ParsedStackTrace,
StackFrame,
} from "../../Utils/StackTraceParser";
describe("StackTraceParser", () => {
describe("parse", () => {
test("returns empty frames for empty input", () => {
const result: ParsedStackTrace = StackTraceParser.parse("");
expect(result.frames).toHaveLength(0);
expect(result.raw).toBe("");
});
test("returns empty frames for null-ish input", () => {
const result: ParsedStackTrace = StackTraceParser.parse(
undefined as unknown as string,
);
expect(result.frames).toHaveLength(0);
});
test("preserves raw stack trace", () => {
const rawTrace: string =
"Error: something\n at foo (/app/bar.js:10:5)";
const result: ParsedStackTrace = StackTraceParser.parse(rawTrace);
expect(result.raw).toBe(rawTrace);
});
});
describe("JavaScript/Node.js stack traces", () => {
test("parses standard Node.js stack trace", () => {
const trace: string = `TypeError: Cannot read property 'id' of undefined
at getUser (/app/src/services/user.ts:42:15)
at processRequest (/app/src/controllers/api.ts:128:20)
at Layer.handle [as handle_request] (node_modules/express/lib/router/layer.js:95:5)
at next (node_modules/express/lib/router/route.js:144:13)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
// First frame should be getUser
const firstFrame: StackFrame | undefined = result.frames[0];
expect(firstFrame).toBeDefined();
expect(firstFrame!.functionName).toBe("getUser");
expect(firstFrame!.fileName).toBe("/app/src/services/user.ts");
expect(firstFrame!.lineNumber).toBe(42);
expect(firstFrame!.columnNumber).toBe(15);
expect(firstFrame!.inApp).toBe(true);
});
test("marks node_modules as library code", () => {
const trace: string = `Error: test
at handler (/app/src/handler.js:10:5)
at Layer.handle (node_modules/express/lib/router/layer.js:95:5)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
const expressFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.fileName.includes("express");
},
);
expect(expressFrame).toBeDefined();
expect(expressFrame!.inApp).toBe(false);
});
test("parses anonymous function frames", () => {
const trace: string = `Error: test
at /app/src/index.js:5:10`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(1);
expect(result.frames[0]!.functionName).toBe("<anonymous>");
});
});
describe("Python stack traces", () => {
test("parses standard Python traceback", () => {
const trace: string = `Traceback (most recent call last):
File "/app/main.py", line 42, in handle_request
result = process_data(data)
File "/app/utils.py", line 15, in process_data
return data["key"]
KeyError: 'key'`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
const firstFrame: StackFrame | undefined = result.frames[0];
expect(firstFrame).toBeDefined();
expect(firstFrame!.fileName).toBe("/app/main.py");
expect(firstFrame!.lineNumber).toBe(42);
expect(firstFrame!.functionName).toBe("handle_request");
expect(firstFrame!.inApp).toBe(true);
});
test("marks site-packages as library code", () => {
const trace: string = `Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/django/core/handlers.py", line 47, in inner
response = get_response(request)
File "/app/views.py", line 10, in index
raise ValueError("test")`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
const djangoFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.fileName.includes("site-packages");
},
);
expect(djangoFrame).toBeDefined();
expect(djangoFrame!.inApp).toBe(false);
const appFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.fileName === "/app/views.py";
},
);
expect(appFrame).toBeDefined();
expect(appFrame!.inApp).toBe(true);
});
});
describe("Java stack traces", () => {
test("parses standard Java stack trace", () => {
const trace: string = `java.lang.NullPointerException: Cannot invoke method on null
at com.myapp.service.UserService.getUser(UserService.java:42)
at com.myapp.controller.ApiController.handleRequest(ApiController.java:128)
at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:897)
at javax.servlet.http.HttpServlet.service(HttpServlet.java:750)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
const firstFrame: StackFrame | undefined = result.frames[0];
expect(firstFrame).toBeDefined();
expect(firstFrame!.functionName).toContain("UserService.getUser");
expect(firstFrame!.fileName).toBe("UserService.java");
expect(firstFrame!.lineNumber).toBe(42);
});
test("marks standard Java libs as library code", () => {
const trace: string = `Exception
at com.myapp.Main.run(Main.java:10)
at java.lang.Thread.run(Thread.java:748)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
const javaFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.functionName.startsWith("java.");
},
);
expect(javaFrame).toBeDefined();
expect(javaFrame!.inApp).toBe(false);
});
test("handles Native Method entries", () => {
const trace: string = `Exception
at sun.reflect.NativeMethodAccessorImpl.invoke(Native Method)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
if (result.frames.length > 0) {
expect(result.frames[0]!.fileName).toBe("Native Method");
expect(result.frames[0]!.inApp).toBe(false);
}
});
});
describe("Go stack traces", () => {
test("parses standard Go stack trace", () => {
const trace: string = `goroutine 1 [running]:
main.handler(0xc0000b4000)
/app/main.go:42 +0x1a5
net/http.(*ServeMux).ServeHTTP(0xc0000b4000, 0x7f3a9c, 0xc0000b8000)
/usr/local/go/src/net/http/server.go:2387 +0x1a5`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(1);
const appFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.fileName === "/app/main.go";
},
);
expect(appFrame).toBeDefined();
expect(appFrame!.lineNumber).toBe(42);
expect(appFrame!.inApp).toBe(true);
});
});
describe("Ruby stack traces", () => {
test("parses standard Ruby backtrace", () => {
const trace: string = `/app/controllers/users_controller.rb:42:in 'show'
/app/middleware/auth.rb:15:in 'call'
/usr/local/lib/ruby/gems/2.7.0/gems/rack-2.2.3/lib/rack/handler.rb:12:in 'call'`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
const firstFrame: StackFrame | undefined = result.frames[0];
expect(firstFrame).toBeDefined();
expect(firstFrame!.fileName).toBe("/app/controllers/users_controller.rb");
expect(firstFrame!.lineNumber).toBe(42);
expect(firstFrame!.functionName).toBe("show");
expect(firstFrame!.inApp).toBe(true);
});
test("marks gems as library code", () => {
const trace: string = `/usr/local/lib/ruby/gems/2.7.0/gems/rack-2.2.3/lib/rack/handler.rb:12:in 'call'`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
if (result.frames.length > 0) {
expect(result.frames[0]!.inApp).toBe(false);
}
});
});
describe("C#/.NET stack traces", () => {
test("parses .NET stack trace with file info", () => {
const trace: string = `System.NullReferenceException: Object reference not set
at MyApp.Services.UserService.GetUser(Int32 id) in /app/Services/UserService.cs:line 42
at MyApp.Controllers.ApiController.HandleRequest() in /app/Controllers/ApiController.cs:line 128
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
const userServiceFrame: StackFrame | undefined = result.frames.find(
(f: StackFrame) => {
return f.fileName.includes("UserService.cs");
},
);
expect(userServiceFrame).toBeDefined();
expect(userServiceFrame!.lineNumber).toBe(42);
});
});
describe("PHP stack traces", () => {
test("parses standard PHP stack trace", () => {
const trace: string = `#0 /app/src/Controller/UserController.php(42): App\\Service\\UserService->getUser()
#1 /app/vendor/symfony/http-kernel/HttpKernel.php(128): App\\Controller\\UserController->show()
#2 {main}`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
expect(result.frames.length).toBeGreaterThanOrEqual(2);
const firstFrame: StackFrame | undefined = result.frames[0];
expect(firstFrame).toBeDefined();
expect(firstFrame!.fileName).toBe(
"/app/src/Controller/UserController.php",
);
expect(firstFrame!.lineNumber).toBe(42);
expect(firstFrame!.inApp).toBe(true);
});
test("marks vendor as library code", () => {
const trace: string = `#0 /app/vendor/symfony/http-kernel/HttpKernel.php(128): App\\Controller\\UserController->show()`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
if (result.frames.length > 0) {
expect(result.frames[0]!.inApp).toBe(false);
}
});
});
describe("inApp detection", () => {
test("node_modules is not app code", () => {
const trace: string = `Error: test
at handler (node_modules/express/lib/router.js:10:5)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
if (result.frames.length > 0) {
expect(result.frames[0]!.inApp).toBe(false);
}
});
test("application source is app code", () => {
const trace: string = `Error: test
at handler (/app/src/handler.ts:10:5)`;
const result: ParsedStackTrace = StackTraceParser.parse(trace);
if (result.frames.length > 0) {
expect(result.frames[0]!.inApp).toBe(true);
}
});
});
});

View File

@@ -1,57 +0,0 @@
import {
ParsedSyslogMessage,
parseSyslogMessage,
} from "../../Utils/SyslogParser";
describe("SyslogParser", () => {
test("parses RFC5424 message with structured data", () => {
const message: string =
"<34>1 2025-03-02T14:48:05.003Z mymachine app-name 1234 ID47 " +
'[exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"][meta key="value"] ' +
"BOMAn application event log entry";
const parsed: ParsedSyslogMessage | null = parseSyslogMessage(message);
expect(parsed).not.toBeNull();
expect(parsed?.priority).toBe(34);
expect(parsed?.severity).toBe(2);
expect(parsed?.facility).toBe(4);
expect(parsed?.version).toBe(1);
expect(parsed?.hostname).toBe("mymachine");
expect(parsed?.appName).toBe("app-name");
expect(parsed?.procId).toBe("1234");
expect(parsed?.msgId).toBe("ID47");
expect(parsed?.timestamp?.toISOString()).toBe("2025-03-02T14:48:05.003Z");
expect(parsed?.structuredData?.["exampleSDID_32473"]?.["iut"]).toBe("3");
expect(parsed?.structuredData?.["meta"]?.["key"]).toBe("value");
expect(parsed?.message).toBe("An application event log entry");
});
test("parses RFC3164 message", () => {
const message: string =
"<13>Feb 5 17:32:18 mymachine su[12345]: 'su root' failed for lonvick on /dev/pts/8";
const parsed: ParsedSyslogMessage | null = parseSyslogMessage(message);
expect(parsed).not.toBeNull();
expect(parsed?.priority).toBe(13);
expect(parsed?.severity).toBe(5);
expect(parsed?.facility).toBe(1);
expect(parsed?.hostname).toBe("mymachine");
expect(parsed?.appName).toBe("su");
expect(parsed?.procId).toBe("12345");
expect(parsed?.message).toBe("'su root' failed for lonvick on /dev/pts/8");
expect(parsed?.timestamp).toBeInstanceOf(Date);
});
test("handles message without priority", () => {
const message: string = "Simple message without metadata";
const parsed: ParsedSyslogMessage | null = parseSyslogMessage(message);
expect(parsed).not.toBeNull();
expect(parsed?.priority).toBeUndefined();
expect(parsed?.severity).toBeUndefined();
expect(parsed?.facility).toBeUndefined();
expect(parsed?.message).toBe("Simple message without metadata");
});
});

View File

@@ -1,32 +0,0 @@
{
"preset": "ts-jest",
"testPathIgnorePatterns": [
"node_modules",
"dist"
],
"verbose": true,
"globals": {
"ts-jest": {
"tsconfig": "tsconfig.json",
"babelConfig": false
}
},
"moduleFileExtensions": ["ts", "js", "json"],
"transform": {
".(ts|tsx)": "ts-jest"
},
"testEnvironment": "node",
"collectCoverage": false,
"coverageReporters": ["text", "lcov"],
"testRegex": "./Tests/(.*).test.ts",
"collectCoverageFrom": ["./**/*.(tsx||ts)"],
"coverageThreshold": {
"global": {
"lines": 0,
"functions": 0,
"branches": 0,
"statements": 0
}
}
}

View File

@@ -1,14 +0,0 @@
{
"watch": [
"./",
"../Common/Server",
"../Common/Types",
"../Common/Utils",
"../Common/Models"
],
"ext": "ts,tsx",
"ignore": ["./node_modules/**", "./public/**", "./bin/**", "./build/**"],
"watchOptions": { "useFsEvents": false, "interval": 500 },
"env": { "TS_NODE_TRANSPILE_ONLY": "1", "TS_NODE_FILES": "false" },
"exec": "node --use-openssl-ca -r ts-node/register/transpile-only Index.ts"
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,36 +0,0 @@
{
"name": "@oneuptime/telemetry",
"version": "1.0.0",
"description": "",
"repository": {
"type": "git",
"url": "https://github.com/OneUptime/oneuptime"
},
"main": "index.js",
"scripts": {
"start": "export NODE_OPTIONS='--max-old-space-size=8096 --use-openssl-ca' && node --require ts-node/register Index.ts",
"compile": "tsc",
"clear-modules": "rm -rf node_modules && rm package-lock.json && npm install",
"dev": "NODE_OPTIONS='--use-openssl-ca' npx nodemon",
"audit": "npm audit --audit-level=low",
"dep-check": "npm install -g depcheck && depcheck ./ --skip-missing=true",
"test": "jest --passWithNoTests"
},
"author": "OneUptime <hello@oneuptime.com> (https://oneuptime.com/)",
"license": "Apache-2.0",
"dependencies": {
"@grpc/grpc-js": "^1.12.5",
"@grpc/proto-loader": "^0.7.13",
"Common": "file:../Common",
"ejs": "^3.1.10",
"protobufjs": "^7.3.2",
"ts-node": "^10.9.1"
},
"devDependencies": {
"@types/jest": "^27.5.0",
"@types/node": "^17.0.31",
"jest": "^28.1.0",
"nodemon": "^2.0.20",
"ts-jest": "^28.0.2"
}
}

View File

@@ -1,115 +0,0 @@
{
"ts-node": {
// these options are overrides used only by ts-node
// same as the --compilerOptions flag and the TS_NODE_COMPILER_OPTIONS environment variable
"compilerOptions": {
"module": "commonjs",
"resolveJsonModule": true,
}
},
"compilerOptions": {
/* Visit https://aka.ms/tsconfig.json to read more about this file */
/* Projects */
// "incremental": true, /* Enable incremental compilation */
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
// "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
/* Language and Environment */
"target": "es2017" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */,
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
"jsx": "react" /* Specify what JSX code is generated. */,
"experimentalDecorators": true /* Enable experimental support for TC39 stage 2 draft decorators. */,
"emitDecoratorMetadata": true /* Emit design-type metadata for decorated declarations in source files. */,
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */
// "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
/* Modules */
// "module": "es2022" /* Specify what module code is generated. */,
"rootDir": "" /* Specify the root folder within your source files. */,
"moduleResolution": "node" /* Specify how TypeScript looks up a file from a given module specifier. */,
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
"typeRoots": [
"./node_modules/@types"
] /* Specify multiple folders that act like `./node_modules/@types`. */,
"types": [
"node",
"jest"
] /* Specify type package names to be included without being referenced in a source file. */,
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
// "resolveJsonModule": true, /* Enable importing .json files */
// "noResolve": true, /* Disallow `import`s, `require`s or `<reference>`s from expanding the number of files TypeScript should add to a project. */
/* JavaScript Support */
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */
/* Emit */
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
"sourceMap": true /* Create source map files for emitted JavaScript files. */,
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */
"outDir": "build/dist" /* Specify an output folder for all emitted files. */,
// "removeComments": true, /* Disable emitting comments. */
// "noEmit": true, /* Disable emitting files from a compilation. */
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
// "newLine": "crlf", /* Set the newline character for emitting files. */
// "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */
// "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
// "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
/* Interop Constraints */
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
"esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */,
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
"forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */,
/* Type Checking */
"strict": true /* Enable all strict type-checking options. */,
"noImplicitAny": true /* Enable error reporting for expressions and declarations with an implied `any` type.. */,
"strictNullChecks": true /* When type checking, take into account `null` and `undefined`. */,
"strictFunctionTypes": true /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */,
"strictBindCallApply": true /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */,
"strictPropertyInitialization": true /* Check for class properties that are declared but not set in the constructor. */,
"noImplicitThis": true /* Enable error reporting when `this` is given the type `any`. */,
"useUnknownInCatchVariables": true /* Type catch clause variables as 'unknown' instead of 'any'. */,
"alwaysStrict": true /* Ensure 'use strict' is always emitted. */,
"noUnusedLocals": true /* Enable error reporting when a local variables aren't read. */,
"noUnusedParameters": true /* Raise an error when a function parameter isn't read */,
"exactOptionalPropertyTypes": true /* Interpret optional property types as written, rather than adding 'undefined'. */,
"noImplicitReturns": true /* Enable error reporting for codepaths that do not explicitly return in a function. */,
"noFallthroughCasesInSwitch": true /* Enable error reporting for fallthrough cases in switch statements. */,
"noUncheckedIndexedAccess": true /* Include 'undefined' in index signature results */,
"noImplicitOverride": true /* Ensure overriding members in derived classes are marked with an override modifier. */,
"noPropertyAccessFromIndexSignature": true /* Enforces using indexed accessors for keys declared using an indexed type */,
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
/* Completeness */
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
"skipLibCheck": true, /* Skip type checking all .d.ts files. */
"resolveJsonModule": true
}
}

View File

@@ -99,14 +99,14 @@ REDIS_TLS_CA=
REDIS_TLS_SENTINEL_MODE=false
# Hostnames. Usually does not need to change.
TELEMETRY_HOSTNAME=telemetry:3403
TELEMETRY_HOSTNAME=app:3002
SERVER_APP_HOSTNAME=app
SERVER_TELEMETRY_HOSTNAME=telemetry
SERVER_TELEMETRY_HOSTNAME=app
#Ports. Usually they don't need to change.
APP_PORT=3002
TELEMETRY_PORT=3403
TELEMETRY_PORT=3002
TEST_SERVER_PORT=3800
HOME_PORT=1444
# Plans
@@ -290,7 +290,6 @@ AI_AGENT_PORT=3876
# By default telemetry is disabled for all services in docker compose. If you want to enable telemetry for a service, then set the env var to false.
DISABLE_TELEMETRY_FOR_APP=true
DISABLE_TELEMETRY_FOR_TELEMETRY=true
DISABLE_TELEMETRY_FOR_TEST_SERVER=true
DISABLE_TELEMETRY_FOR_PROBE=true
DISABLE_TELEMETRY_FOR_INGRESS=true
@@ -298,7 +297,6 @@ DISABLE_TELEMETRY_FOR_AI_AGENT=true
# By default profiling is disabled for all services. Set to true to enable CPU profiling for a service.
ENABLE_PROFILING_FOR_APP=false
ENABLE_PROFILING_FOR_TELEMETRY=false
ENABLE_PROFILING_FOR_TEST_SERVER=false
ENABLE_PROFILING_FOR_PROBE=false
ENABLE_PROFILING_FOR_AI_AGENT=false

View File

@@ -30,12 +30,12 @@ x-common-variables: &common-variables
ALLOWED_ACTIVE_MONITOR_COUNT_IN_FREE_PLAN: ${ALLOWED_ACTIVE_MONITOR_COUNT_IN_FREE_PLAN}
SERVER_APP_HOSTNAME: app
SERVER_TELEMETRY_HOSTNAME: telemetry
SERVER_TELEMETRY_HOSTNAME: app
SERVER_HOME_HOSTNAME: home
#Ports. Usually they don't need to change.
APP_PORT: ${APP_PORT}
HOME_PORT: ${HOME_PORT}
TELEMETRY_PORT: ${TELEMETRY_PORT}
TELEMETRY_PORT: ${APP_PORT}
OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT: ${OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT}
OPENTELEMETRY_EXPORTER_OTLP_HEADERS: ${OPENTELEMETRY_EXPORTER_OTLP_HEADERS}
@@ -243,6 +243,9 @@ services:
DISABLE_TELEMETRY: ${DISABLE_TELEMETRY_FOR_APP}
ENABLE_PROFILING: ${ENABLE_PROFILING_FOR_APP}
WORKER_CONCURRENCY: ${WORKER_CONCURRENCY}
# Telemetry ingestion settings (merged from telemetry service)
TELEMETRY_CONCURRENCY: ${TELEMETRY_CONCURRENCY}
REGISTER_PROBE_KEY: ${REGISTER_PROBE_KEY}
logging:
driver: "local"
options:
@@ -343,23 +346,6 @@ services:
options:
max-size: "1000m"
telemetry:
networks:
- oneuptime
restart: always
environment:
<<: *common-runtime-variables
PORT: ${TELEMETRY_PORT}
DISABLE_TELEMETRY: ${DISABLE_TELEMETRY_FOR_TELEMETRY}
ENABLE_PROFILING: ${ENABLE_PROFILING_FOR_TELEMETRY}
# Max concurrent telemetry jobs the worker will process
TELEMETRY_CONCURRENCY: ${TELEMETRY_CONCURRENCY}
REGISTER_PROBE_KEY: ${REGISTER_PROBE_KEY}
logging:
driver: "local"
options:
max-size: "1000m"
e2e:
restart: "no"
network_mode: host # This is needed to access the host network,

View File

@@ -53,14 +53,6 @@ services:
depends_on:
<<: *common-depends-on
telemetry:
image: oneuptime/telemetry:${APP_TAG}
extends:
file: ./docker-compose.base.yml
service: telemetry
depends_on:
<<: *common-depends-on
ingress:
image: oneuptime/nginx:${APP_TAG}
extends:

View File

@@ -173,26 +173,6 @@ services:
context: .
dockerfile: ./AIAgent/Dockerfile
telemetry:
volumes:
- ./Telemetry:/usr/src/app:cached
# Use node modules of the container and not host system.
# https://stackoverflow.com/questions/29181032/add-a-volume-to-docker-but-exclude-a-sub-folder
- /usr/src/app/node_modules/
- ./Common:/usr/src/Common:cached
- /usr/src/Common/node_modules/
ports:
- '9938:9229' # Debugging port.
extends:
file: ./docker-compose.base.yml
service: telemetry
depends_on:
<<: *common-depends-on
build:
network: host
context: .
dockerfile: ./Telemetry/Dockerfile
# Fluentd. Required only for development. In production its the responsibility of the customer to run fluentd and pipe logs to OneUptime.
# We run this container just for development, to see if logs are piped.

View File

@@ -45,14 +45,6 @@ services:
depends_on:
<<: *common-depends-on
telemetry:
image: oneuptime/telemetry:${APP_TAG}
extends:
file: ./docker-compose.base.yml
service: telemetry
depends_on:
<<: *common-depends-on
ingress:
image: oneuptime/nginx:${APP_TAG}
extends: