diff --git a/.vscode/launch.json b/.vscode/launch.json index 484b06f87f..2c2dbb3503 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -77,6 +77,20 @@ "restart": true, "autoAttachChildProcesses": true }, + { + "address": "127.0.0.1", + "localRoot": "${workspaceFolder}/Worker", + "name": "Worker: Debug with Docker", + "port": 8734, + "remoteRoot": "/usr/src/app", + "request": "attach", + "skipFiles": [ + "/**" + ], + "type": "node", + "restart": true, + "autoAttachChildProcesses": true + }, { "address": "127.0.0.1", "localRoot": "${workspaceFolder}/TestServer", diff --git a/Common/Server/EnvironmentConfig.ts b/Common/Server/EnvironmentConfig.ts index 840ee1f789..e9724cc75a 100644 --- a/Common/Server/EnvironmentConfig.ts +++ b/Common/Server/EnvironmentConfig.ts @@ -100,6 +100,12 @@ export const IsolatedVMHostname: Hostname = Hostname.fromString( }`, ); +export const WorkerHostname: Hostname = Hostname.fromString( + `${process.env["SERVER_WORKER_HOSTNAME"] || "localhost"}:${ + process.env["WORKER_PORT"] || 80 + }`, +); + export const HomeHostname: Hostname = Hostname.fromString( `${process.env["SERVER_HOME_HOSTNAME"] || "localhost"}:${ process.env["HOME_PORT"] || 80 diff --git a/HelmChart/Public/oneuptime/templates/_helpers.tpl b/HelmChart/Public/oneuptime/templates/_helpers.tpl index ab7d884289..326b6288cd 100644 --- a/HelmChart/Public/oneuptime/templates/_helpers.tpl +++ b/HelmChart/Public/oneuptime/templates/_helpers.tpl @@ -48,6 +48,8 @@ Usage: value: {{ $.Release.Name }}-accounts.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }} - name: SERVER_ISOLATED_VM_HOSTNAME value: {{ $.Release.Name }}-isolated-vm.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }} +- name: SERVER_WORKER_HOSTNAME + value: {{ $.Release.Name }}-worker.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }} - name: SERVER_HOME_HOSTNAME value: {{ $.Release.Name }}-home.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }} - name: SERVER_APP_HOSTNAME @@ -79,6 +81,8 @@ Usage: value: {{ $.Values.port.isolatedVM | squote }} - name: HOME_PORT value: {{ $.Values.port.home | squote }} +- name: WORKER_PORT + value: {{ $.Values.port.worker | squote }} - name: STATUS_PAGE_PORT value: {{ $.Values.port.statusPage | squote }} - name: DASHBOARD_PORT diff --git a/HelmChart/Public/oneuptime/templates/home copy.yaml b/HelmChart/Public/oneuptime/templates/home copy.yaml new file mode 100644 index 0000000000..42a0fcf4e0 --- /dev/null +++ b/HelmChart/Public/oneuptime/templates/home copy.yaml @@ -0,0 +1,85 @@ +# OneUptime worker Deployment +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ printf "%s-%s" $.Release.Name "worker" }} + namespace: {{ $.Release.Namespace }} + labels: + app: {{ printf "%s-%s" $.Release.Name "worker" }} + app.kubernetes.io/part-of: oneuptime + app.kubernetes.io/managed-by: Helm + appname: oneuptime + date: "{{ now | unixEpoch }}" +spec: + selector: + matchLabels: + app: {{ printf "%s-%s" $.Release.Name "worker" }} + replicas: {{ $.Values.deployment.replicaCount }} + template: + metadata: + labels: + app: {{ printf "%s-%s" $.Release.Name "worker" }} + date: "{{ now | unixEpoch }}" + appname: oneuptime + spec: + {{- if $.Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml $.Values.imagePullSecrets | nindent 8 }} + {{- end }} + {{- if $.Values.podSecurityContext }} + securityContext: {{- $.Values.podSecurityContext | toYaml | nindent 8 }} + {{- end }} + {{- if $.Values.affinity }} + affinity: {{- $.Values.affinity | toYaml | nindent 8 }} + {{- end }} + {{- if $.Values.tolerations }} + tolerations: {{- $.Values.tolerations | toYaml | nindent 8 }} + {{- end }} + {{- if $.Values.nodeSelector }} + nodeSelector: {{- $.Values.nodeSelector | toYaml | nindent 8 }} + {{- end }} + containers: + - image: {{ printf "%s/%s/%s:%s" $.Values.image.registry $.Values.image.repository "worker" $.Values.image.tag }} + name: {{ printf "%s-%s" $.Release.Name "worker" }} + # Liveness probe + livenessProbe: + httpGet: + path: /status/live + port: {{ $.Values.port.worker }} + initialDelaySeconds: 300 + periodSeconds: 10 + timeoutSeconds: 30 + # Readyness Probe + readinessProbe: + httpGet: + path: /status/ready + port: {{ $.Values.port.worker }} + initialDelaySeconds: 300 + periodSeconds: 10 + timeoutSeconds: 30 + {{- if $.Values.containerSecurityContext }} + securityContext: {{- $.Values.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + env: + {{- include "oneuptime.env.common" . | nindent 12 }} + {{- include "oneuptime.env.commonServer" . | nindent 12 }} + {{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }} + ports: + - containerPort: {{ $.Values.port.worker }} + protocol: TCP + name: http + restartPolicy: {{ $.Values.image.restartPolicy }} + +--- + +# OneUptime app Service +{{- $workerPorts := dict "port" $.Values.port.worker -}} +{{- $workerServiceArgs := dict "ServiceName" "worker" "Ports" $workerPorts "Release" $.Release "Values" $.Values -}} +{{- include "oneuptime.service" $workerServiceArgs }} +--- + +# OneUptime app autoscaler +{{- $workerAutoScalerArgs := dict "ServiceName" "worker" "Release" $.Release "Values" $.Values -}} +{{- include "oneuptime.autoscaler" $workerAutoScalerArgs }} +--- \ No newline at end of file diff --git a/HelmChart/Public/oneuptime/values.yaml b/HelmChart/Public/oneuptime/values.yaml index ad17c0875e..e62d9eb410 100644 --- a/HelmChart/Public/oneuptime/values.yaml +++ b/HelmChart/Public/oneuptime/values.yaml @@ -208,6 +208,7 @@ port: otelCollectorHttp: 4318 isolatedVM: 4572 home: 1444 + worker: 1445 testServer: diff --git a/Nginx/default.conf.template b/Nginx/default.conf.template index 155de0f6c1..8620c85733 100644 --- a/Nginx/default.conf.template +++ b/Nginx/default.conf.template @@ -22,6 +22,10 @@ upstream isolated-vm { server ${SERVER_ISOLATED_VM_HOSTNAME}:${ISOLATED_VM_PORT} weight=10 max_fails=3 fail_timeout=30s; } +upstream worker { + server ${SERVER_WORKER_HOSTNAME}:${WORKER_PORT} weight=10 max_fails=3 fail_timeout=30s; +} + upstream home { server ${SERVER_HOME_HOSTNAME}:${HOME_PORT} weight=10 max_fails=3 fail_timeout=30s; } @@ -597,6 +601,21 @@ server { proxy_pass http://isolated-vm; } + location /worker { + # This is for nginx not to crash when service is not available. + resolver 127.0.0.1 valid=30s; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # enable WebSockets (for ws://sockjs not connected error in the accounts source: https://stackoverflow.com/questions/41381444/websocket-connection-failed-error-during-websocket-handshake-unexpected-respon) + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_pass http://worker; + } + location /status-page { # This is for nginx not to crash when service is not available. resolver 127.0.0.1 valid=30s; diff --git a/config.example.env b/config.example.env index f709ce8bce..49a1a4e653 100644 --- a/config.example.env +++ b/config.example.env @@ -115,7 +115,8 @@ DASHBOARD_PORT=3009 ADMIN_DASHBOARD_PORT=3158 OTEL_COLLECTOR_HTTP_PORT=4318 ISOLATED_VM_PORT=4572 - +HOME_PORT=1444 +WORKER_PORT=1445 # If USE_INTERNAL_SMTP is true then you need to fill these values. INTERNAL_SMTP_FROM_NAME=OneUptime diff --git a/docker-compose.base.yml b/docker-compose.base.yml index 5b6f3d425c..fc245d67e6 100644 --- a/docker-compose.base.yml +++ b/docker-compose.base.yml @@ -34,6 +34,7 @@ x-common-variables: &common-variables SERVER_ADMIN_DASHBOARD_HOSTNAME: admin-dashboard SERVER_OTEL_COLLECTOR_HOSTNAME: otel-collector SERVER_ISOLATED_VM_HOSTNAME: isolated-vm + SERVER_WORKER_HOSTNAME: worker SERVER_HOME_HOSTNAME: home #Ports. Usually they don't need to change. @@ -47,6 +48,7 @@ x-common-variables: &common-variables DASHBOARD_PORT: ${DASHBOARD_PORT} ADMIN_DASHBOARD_PORT: ${ADMIN_DASHBOARD_PORT} ISOLATED_VM_PORT: ${ISOLATED_VM_PORT} + WORKER_PORT: ${WORKER_PORT} OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT: ${OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT} OPENTELEMETRY_EXPORTER_OTLP_HEADERS: ${OPENTELEMETRY_EXPORTER_OTLP_HEADERS} @@ -288,6 +290,18 @@ services: options: max-size: "1000m" + worker: + networks: + - oneuptime + restart: always + environment: + <<: *common-server-variables + PORT: ${WORKER_PORT} + logging: + driver: "local" + options: + max-size: "1000m" + probe-1: networks: - oneuptime diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml index 58ccb1455b..959bc4055d 100644 --- a/docker-compose.dev.yml +++ b/docker-compose.dev.yml @@ -178,6 +178,24 @@ services: context: . dockerfile: ./Home/Dockerfile + worker: + volumes: + - ./Worker:/usr/src/app + # Use node modules of the container and not host system. + # https://stackoverflow.com/questions/29181032/add-a-volume-to-docker-but-exclude-a-sub-folder + - /usr/src/app/node_modules/ + - ./Common:/usr/src/Common + - /usr/src/Common/node_modules/ + extends: + file: ./docker-compose.base.yml + service: worker + ports: + - '8734:9229' # Debugging port. + build: + network: host + context: . + dockerfile: ./Worker/Dockerfile + app: volumes: - ./App:/usr/src/app diff --git a/docker-compose.yml b/docker-compose.yml index 94e62825f8..9d166c2bd4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -64,6 +64,12 @@ services: extends: file: ./docker-compose.base.yml service: app + + worker: + image: oneuptime/worker:${APP_TAG} + extends: + file: ./docker-compose.base.yml + service: worker home: image: oneuptime/home:${APP_TAG}